summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-05-28 06:21:47 -0400
committerIngo Molnar <mingo@kernel.org>2015-06-02 01:57:48 -0400
commit131484c8da97ed600c18dd9d03b661e8ae052df6 (patch)
tree18293a131e8a40a9a339734259a74d33cbba1186
parentcdeb6048940fa4bfb429e2f1cba0d28a11e20cd5 (diff)
x86/debug: Remove perpetually broken, unmaintainable dwarf annotations
So the dwarf2 annotations in low level assembly code have become an increasing hindrance: unreadable, messy macros mixed into some of the most security sensitive code paths of the Linux kernel. These debug info annotations don't even buy the upstream kernel anything: dwarf driven stack unwinding has caused problems in the past so it's out of tree, and the upstream kernel only uses the much more robust framepointers based stack unwinding method. In addition to that there's a steady, slow bitrot going on with these annotations, requiring frequent fixups. There's no tooling and no functionality upstream that keeps it correct. So burn down the sick forest, allowing new, healthier growth: 27 files changed, 350 insertions(+), 1101 deletions(-) Someone who has the willingness and time to do this properly can attempt to reintroduce dwarf debuginfo in x86 assembly code plus dwarf unwinding from first principles, with the following conditions: - it should be maximally readable, and maximally low-key to 'ordinary' code reading and maintenance. - find a build time method to insert dwarf annotations automatically in the most common cases, for pop/push instructions that manipulate the stack pointer. This could be done for example via a preprocessing step that just looks for common patterns - plus special annotations for the few cases where we want to depart from the default. We have hundreds of CFI annotations, so automating most of that makes sense. - it should come with build tooling checks that ensure that CFI annotations are sensible. We've seen such efforts from the framepointer side, and there's no reason it couldn't be done on the dwarf side. Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Frédéric Weisbecker <fweisbec@gmail.com Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jan Beulich <JBeulich@suse.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/Makefile10
-rw-r--r--arch/x86/ia32/ia32entry.S133
-rw-r--r--arch/x86/include/asm/calling.h94
-rw-r--r--arch/x86/include/asm/dwarf2.h170
-rw-r--r--arch/x86/include/asm/frame.h7
-rw-r--r--arch/x86/kernel/entry_32.S368
-rw-r--r--arch/x86/kernel/entry_64.S288
-rw-r--r--arch/x86/lib/atomic64_386_32.S7
-rw-r--r--arch/x86/lib/atomic64_cx8_32.S61
-rw-r--r--arch/x86/lib/checksum_32.S52
-rw-r--r--arch/x86/lib/clear_page_64.S7
-rw-r--r--arch/x86/lib/cmpxchg16b_emu.S12
-rw-r--r--arch/x86/lib/cmpxchg8b_emu.S11
-rw-r--r--arch/x86/lib/copy_page_64.S11
-rw-r--r--arch/x86/lib/copy_user_64.S15
-rw-r--r--arch/x86/lib/csum-copy_64.S17
-rw-r--r--arch/x86/lib/getuser.S13
-rw-r--r--arch/x86/lib/iomap_copy_64.S3
-rw-r--r--arch/x86/lib/memcpy_64.S3
-rw-r--r--arch/x86/lib/memmove_64.S3
-rw-r--r--arch/x86/lib/memset_64.S5
-rw-r--r--arch/x86/lib/msr-reg.S44
-rw-r--r--arch/x86/lib/putuser.S8
-rw-r--r--arch/x86/lib/rwsem.S49
-rw-r--r--arch/x86/lib/thunk_32.S15
-rw-r--r--arch/x86/lib/thunk_64.S44
-rw-r--r--arch/x86/net/bpf_jit.S1
27 files changed, 350 insertions, 1101 deletions
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 57996ee840dd..43e8328a23e4 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -149,12 +149,6 @@ endif
149sp-$(CONFIG_X86_32) := esp 149sp-$(CONFIG_X86_32) := esp
150sp-$(CONFIG_X86_64) := rsp 150sp-$(CONFIG_X86_64) := rsp
151 151
152# do binutils support CFI?
153cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
154# is .cfi_signal_frame supported too?
155cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
156cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
157
158# does binutils support specific instructions? 152# does binutils support specific instructions?
159asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1) 153asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
160asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1) 154asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
@@ -162,8 +156,8 @@ asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
162avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) 156avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
163avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) 157avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
164 158
165KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) 159KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
166KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) 160KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
167 161
168LDFLAGS := -m elf_$(UTS_MACHINE) 162LDFLAGS := -m elf_$(UTS_MACHINE)
169 163
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 63450a596800..2be23c734db5 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -4,7 +4,6 @@
4 * Copyright 2000-2002 Andi Kleen, SuSE Labs. 4 * Copyright 2000-2002 Andi Kleen, SuSE Labs.
5 */ 5 */
6 6
7#include <asm/dwarf2.h>
8#include <asm/calling.h> 7#include <asm/calling.h>
9#include <asm/asm-offsets.h> 8#include <asm/asm-offsets.h>
10#include <asm/current.h> 9#include <asm/current.h>
@@ -60,17 +59,6 @@
60 movl %eax,%eax /* zero extension */ 59 movl %eax,%eax /* zero extension */
61 .endm 60 .endm
62 61
63 .macro CFI_STARTPROC32 simple
64 CFI_STARTPROC \simple
65 CFI_UNDEFINED r8
66 CFI_UNDEFINED r9
67 CFI_UNDEFINED r10
68 CFI_UNDEFINED r11
69 CFI_UNDEFINED r12
70 CFI_UNDEFINED r13
71 CFI_UNDEFINED r14
72 CFI_UNDEFINED r15
73 .endm
74 62
75#ifdef CONFIG_PARAVIRT 63#ifdef CONFIG_PARAVIRT
76ENTRY(native_usergs_sysret32) 64ENTRY(native_usergs_sysret32)
@@ -102,11 +90,6 @@ ENDPROC(native_usergs_sysret32)
102 * with the int 0x80 path. 90 * with the int 0x80 path.
103 */ 91 */
104ENTRY(ia32_sysenter_target) 92ENTRY(ia32_sysenter_target)
105 CFI_STARTPROC32 simple
106 CFI_SIGNAL_FRAME
107 CFI_DEF_CFA rsp,0
108 CFI_REGISTER rsp,rbp
109
110 /* 93 /*
111 * Interrupts are off on entry. 94 * Interrupts are off on entry.
112 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, 95 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
@@ -121,25 +104,21 @@ ENTRY(ia32_sysenter_target)
121 movl %eax, %eax 104 movl %eax, %eax
122 105
123 movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d 106 movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
124 CFI_REGISTER rip,r10
125 107
126 /* Construct struct pt_regs on stack */ 108 /* Construct struct pt_regs on stack */
127 pushq_cfi $__USER32_DS /* pt_regs->ss */ 109 pushq $__USER32_DS /* pt_regs->ss */
128 pushq_cfi %rbp /* pt_regs->sp */ 110 pushq %rbp /* pt_regs->sp */
129 CFI_REL_OFFSET rsp,0 111 pushfq /* pt_regs->flags */
130 pushfq_cfi /* pt_regs->flags */ 112 pushq $__USER32_CS /* pt_regs->cs */
131 pushq_cfi $__USER32_CS /* pt_regs->cs */ 113 pushq %r10 /* pt_regs->ip = thread_info->sysenter_return */
132 pushq_cfi %r10 /* pt_regs->ip = thread_info->sysenter_return */ 114 pushq %rax /* pt_regs->orig_ax */
133 CFI_REL_OFFSET rip,0 115 pushq %rdi /* pt_regs->di */
134 pushq_cfi_reg rax /* pt_regs->orig_ax */ 116 pushq %rsi /* pt_regs->si */
135 pushq_cfi_reg rdi /* pt_regs->di */ 117 pushq %rdx /* pt_regs->dx */
136 pushq_cfi_reg rsi /* pt_regs->si */ 118 pushq %rcx /* pt_regs->cx */
137 pushq_cfi_reg rdx /* pt_regs->dx */ 119 pushq $-ENOSYS /* pt_regs->ax */
138 pushq_cfi_reg rcx /* pt_regs->cx */
139 pushq_cfi $-ENOSYS /* pt_regs->ax */
140 cld 120 cld
141 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */ 121 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
142 CFI_ADJUST_CFA_OFFSET 10*8
143 122
144 /* 123 /*
145 * no need to do an access_ok check here because rbp has been 124 * no need to do an access_ok check here because rbp has been
@@ -161,8 +140,8 @@ sysenter_flags_fixed:
161 140
162 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 141 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
163 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 142 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
164 CFI_REMEMBER_STATE
165 jnz sysenter_tracesys 143 jnz sysenter_tracesys
144
166sysenter_do_call: 145sysenter_do_call:
167 /* 32bit syscall -> 64bit C ABI argument conversion */ 146 /* 32bit syscall -> 64bit C ABI argument conversion */
168 movl %edi,%r8d /* arg5 */ 147 movl %edi,%r8d /* arg5 */
@@ -193,14 +172,12 @@ sysexit_from_sys_call:
193 */ 172 */
194 andl $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 173 andl $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
195 movl RIP(%rsp),%ecx /* User %eip */ 174 movl RIP(%rsp),%ecx /* User %eip */
196 CFI_REGISTER rip,rcx
197 RESTORE_RSI_RDI 175 RESTORE_RSI_RDI
198 xorl %edx,%edx /* avoid info leaks */ 176 xorl %edx,%edx /* avoid info leaks */
199 xorq %r8,%r8 177 xorq %r8,%r8
200 xorq %r9,%r9 178 xorq %r9,%r9
201 xorq %r10,%r10 179 xorq %r10,%r10
202 movl EFLAGS(%rsp),%r11d /* User eflags */ 180 movl EFLAGS(%rsp),%r11d /* User eflags */
203 /*CFI_RESTORE rflags*/
204 TRACE_IRQS_ON 181 TRACE_IRQS_ON
205 182
206 /* 183 /*
@@ -231,8 +208,6 @@ sysexit_from_sys_call:
231 */ 208 */
232 USERGS_SYSRET32 209 USERGS_SYSRET32
233 210
234 CFI_RESTORE_STATE
235
236#ifdef CONFIG_AUDITSYSCALL 211#ifdef CONFIG_AUDITSYSCALL
237 .macro auditsys_entry_common 212 .macro auditsys_entry_common
238 movl %esi,%r8d /* 5th arg: 4th syscall arg */ 213 movl %esi,%r8d /* 5th arg: 4th syscall arg */
@@ -282,8 +257,8 @@ sysexit_audit:
282#endif 257#endif
283 258
284sysenter_fix_flags: 259sysenter_fix_flags:
285 pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) 260 pushq $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
286 popfq_cfi 261 popfq
287 jmp sysenter_flags_fixed 262 jmp sysenter_flags_fixed
288 263
289sysenter_tracesys: 264sysenter_tracesys:
@@ -298,7 +273,6 @@ sysenter_tracesys:
298 LOAD_ARGS32 /* reload args from stack in case ptrace changed it */ 273 LOAD_ARGS32 /* reload args from stack in case ptrace changed it */
299 RESTORE_EXTRA_REGS 274 RESTORE_EXTRA_REGS
300 jmp sysenter_do_call 275 jmp sysenter_do_call
301 CFI_ENDPROC
302ENDPROC(ia32_sysenter_target) 276ENDPROC(ia32_sysenter_target)
303 277
304/* 278/*
@@ -332,12 +306,6 @@ ENDPROC(ia32_sysenter_target)
332 * with the int 0x80 path. 306 * with the int 0x80 path.
333 */ 307 */
334ENTRY(ia32_cstar_target) 308ENTRY(ia32_cstar_target)
335 CFI_STARTPROC32 simple
336 CFI_SIGNAL_FRAME
337 CFI_DEF_CFA rsp,0
338 CFI_REGISTER rip,rcx
339 /*CFI_REGISTER rflags,r11*/
340
341 /* 309 /*
342 * Interrupts are off on entry. 310 * Interrupts are off on entry.
343 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, 311 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
@@ -345,7 +313,6 @@ ENTRY(ia32_cstar_target)
345 */ 313 */
346 SWAPGS_UNSAFE_STACK 314 SWAPGS_UNSAFE_STACK
347 movl %esp,%r8d 315 movl %esp,%r8d
348 CFI_REGISTER rsp,r8
349 movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp 316 movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp
350 ENABLE_INTERRUPTS(CLBR_NONE) 317 ENABLE_INTERRUPTS(CLBR_NONE)
351 318
@@ -353,22 +320,19 @@ ENTRY(ia32_cstar_target)
353 movl %eax,%eax 320 movl %eax,%eax
354 321
355 /* Construct struct pt_regs on stack */ 322 /* Construct struct pt_regs on stack */
356 pushq_cfi $__USER32_DS /* pt_regs->ss */ 323 pushq $__USER32_DS /* pt_regs->ss */
357 pushq_cfi %r8 /* pt_regs->sp */ 324 pushq %r8 /* pt_regs->sp */
358 CFI_REL_OFFSET rsp,0 325 pushq %r11 /* pt_regs->flags */
359 pushq_cfi %r11 /* pt_regs->flags */ 326 pushq $__USER32_CS /* pt_regs->cs */
360 pushq_cfi $__USER32_CS /* pt_regs->cs */ 327 pushq %rcx /* pt_regs->ip */
361 pushq_cfi %rcx /* pt_regs->ip */ 328 pushq %rax /* pt_regs->orig_ax */
362 CFI_REL_OFFSET rip,0 329 pushq %rdi /* pt_regs->di */
363 pushq_cfi_reg rax /* pt_regs->orig_ax */ 330 pushq %rsi /* pt_regs->si */
364 pushq_cfi_reg rdi /* pt_regs->di */ 331 pushq %rdx /* pt_regs->dx */
365 pushq_cfi_reg rsi /* pt_regs->si */ 332 pushq %rbp /* pt_regs->cx */
366 pushq_cfi_reg rdx /* pt_regs->dx */
367 pushq_cfi_reg rbp /* pt_regs->cx */
368 movl %ebp,%ecx 333 movl %ebp,%ecx
369 pushq_cfi $-ENOSYS /* pt_regs->ax */ 334 pushq $-ENOSYS /* pt_regs->ax */
370 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */ 335 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
371 CFI_ADJUST_CFA_OFFSET 10*8
372 336
373 /* 337 /*
374 * no need to do an access_ok check here because r8 has been 338 * no need to do an access_ok check here because r8 has been
@@ -380,8 +344,8 @@ ENTRY(ia32_cstar_target)
380 ASM_CLAC 344 ASM_CLAC
381 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 345 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
382 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 346 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
383 CFI_REMEMBER_STATE
384 jnz cstar_tracesys 347 jnz cstar_tracesys
348
385cstar_do_call: 349cstar_do_call:
386 /* 32bit syscall -> 64bit C ABI argument conversion */ 350 /* 32bit syscall -> 64bit C ABI argument conversion */
387 movl %edi,%r8d /* arg5 */ 351 movl %edi,%r8d /* arg5 */
@@ -403,15 +367,12 @@ sysretl_from_sys_call:
403 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 367 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
404 RESTORE_RSI_RDI_RDX 368 RESTORE_RSI_RDI_RDX
405 movl RIP(%rsp),%ecx 369 movl RIP(%rsp),%ecx
406 CFI_REGISTER rip,rcx
407 movl EFLAGS(%rsp),%r11d 370 movl EFLAGS(%rsp),%r11d
408 /*CFI_REGISTER rflags,r11*/
409 xorq %r10,%r10 371 xorq %r10,%r10
410 xorq %r9,%r9 372 xorq %r9,%r9
411 xorq %r8,%r8 373 xorq %r8,%r8
412 TRACE_IRQS_ON 374 TRACE_IRQS_ON
413 movl RSP(%rsp),%esp 375 movl RSP(%rsp),%esp
414 CFI_RESTORE rsp
415 /* 376 /*
416 * 64bit->32bit SYSRET restores eip from ecx, 377 * 64bit->32bit SYSRET restores eip from ecx,
417 * eflags from r11 (but RF and VM bits are forced to 0), 378 * eflags from r11 (but RF and VM bits are forced to 0),
@@ -430,7 +391,6 @@ sysretl_from_sys_call:
430 391
431#ifdef CONFIG_AUDITSYSCALL 392#ifdef CONFIG_AUDITSYSCALL
432cstar_auditsys: 393cstar_auditsys:
433 CFI_RESTORE_STATE
434 movl %r9d,R9(%rsp) /* register to be clobbered by call */ 394 movl %r9d,R9(%rsp) /* register to be clobbered by call */
435 auditsys_entry_common 395 auditsys_entry_common
436 movl R9(%rsp),%r9d /* reload 6th syscall arg */ 396 movl R9(%rsp),%r9d /* reload 6th syscall arg */
@@ -460,7 +420,6 @@ ia32_badarg:
460 ASM_CLAC 420 ASM_CLAC
461 movq $-EFAULT,%rax 421 movq $-EFAULT,%rax
462 jmp ia32_sysret 422 jmp ia32_sysret
463 CFI_ENDPROC
464 423
465/* 424/*
466 * Emulated IA32 system calls via int 0x80. 425 * Emulated IA32 system calls via int 0x80.
@@ -484,15 +443,6 @@ ia32_badarg:
484 */ 443 */
485 444
486ENTRY(ia32_syscall) 445ENTRY(ia32_syscall)
487 CFI_STARTPROC32 simple
488 CFI_SIGNAL_FRAME
489 CFI_DEF_CFA rsp,5*8
490 /*CFI_REL_OFFSET ss,4*8 */
491 CFI_REL_OFFSET rsp,3*8
492 /*CFI_REL_OFFSET rflags,2*8 */
493 /*CFI_REL_OFFSET cs,1*8 */
494 CFI_REL_OFFSET rip,0*8
495
496 /* 446 /*
497 * Interrupts are off on entry. 447 * Interrupts are off on entry.
498 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, 448 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
@@ -506,15 +456,14 @@ ENTRY(ia32_syscall)
506 movl %eax,%eax 456 movl %eax,%eax
507 457
508 /* Construct struct pt_regs on stack (iret frame is already on stack) */ 458 /* Construct struct pt_regs on stack (iret frame is already on stack) */
509 pushq_cfi_reg rax /* pt_regs->orig_ax */ 459 pushq %rax /* pt_regs->orig_ax */
510 pushq_cfi_reg rdi /* pt_regs->di */ 460 pushq %rdi /* pt_regs->di */
511 pushq_cfi_reg rsi /* pt_regs->si */ 461 pushq %rsi /* pt_regs->si */
512 pushq_cfi_reg rdx /* pt_regs->dx */ 462 pushq %rdx /* pt_regs->dx */
513 pushq_cfi_reg rcx /* pt_regs->cx */ 463 pushq %rcx /* pt_regs->cx */
514 pushq_cfi $-ENOSYS /* pt_regs->ax */ 464 pushq $-ENOSYS /* pt_regs->ax */
515 cld 465 cld
516 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */ 466 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
517 CFI_ADJUST_CFA_OFFSET 10*8
518 467
519 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 468 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
520 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 469 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
@@ -544,7 +493,6 @@ ia32_tracesys:
544 LOAD_ARGS32 /* reload args from stack in case ptrace changed it */ 493 LOAD_ARGS32 /* reload args from stack in case ptrace changed it */
545 RESTORE_EXTRA_REGS 494 RESTORE_EXTRA_REGS
546 jmp ia32_do_call 495 jmp ia32_do_call
547 CFI_ENDPROC
548END(ia32_syscall) 496END(ia32_syscall)
549 497
550 .macro PTREGSCALL label, func 498 .macro PTREGSCALL label, func
@@ -554,8 +502,6 @@ GLOBAL(\label)
554 jmp ia32_ptregs_common 502 jmp ia32_ptregs_common
555 .endm 503 .endm
556 504
557 CFI_STARTPROC32
558
559 PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn 505 PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
560 PTREGSCALL stub32_sigreturn, sys32_sigreturn 506 PTREGSCALL stub32_sigreturn, sys32_sigreturn
561 PTREGSCALL stub32_fork, sys_fork 507 PTREGSCALL stub32_fork, sys_fork
@@ -569,23 +515,8 @@ GLOBAL(stub32_clone)
569 515
570 ALIGN 516 ALIGN
571ia32_ptregs_common: 517ia32_ptregs_common:
572 CFI_ENDPROC
573 CFI_STARTPROC32 simple
574 CFI_SIGNAL_FRAME
575 CFI_DEF_CFA rsp,SIZEOF_PTREGS
576 CFI_REL_OFFSET rax,RAX
577 CFI_REL_OFFSET rcx,RCX
578 CFI_REL_OFFSET rdx,RDX
579 CFI_REL_OFFSET rsi,RSI
580 CFI_REL_OFFSET rdi,RDI
581 CFI_REL_OFFSET rip,RIP
582/* CFI_REL_OFFSET cs,CS*/
583/* CFI_REL_OFFSET rflags,EFLAGS*/
584 CFI_REL_OFFSET rsp,RSP
585/* CFI_REL_OFFSET ss,SS*/
586 SAVE_EXTRA_REGS 8 518 SAVE_EXTRA_REGS 8
587 call *%rax 519 call *%rax
588 RESTORE_EXTRA_REGS 8 520 RESTORE_EXTRA_REGS 8
589 ret 521 ret
590 CFI_ENDPROC
591END(ia32_ptregs_common) 522END(ia32_ptregs_common)
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 1c8b50edb2db..0d76accde45b 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -46,8 +46,6 @@ For 32-bit we have the following conventions - kernel is built with
46 46
47*/ 47*/
48 48
49#include <asm/dwarf2.h>
50
51#ifdef CONFIG_X86_64 49#ifdef CONFIG_X86_64
52 50
53/* 51/*
@@ -92,27 +90,26 @@ For 32-bit we have the following conventions - kernel is built with
92 90
93 .macro ALLOC_PT_GPREGS_ON_STACK addskip=0 91 .macro ALLOC_PT_GPREGS_ON_STACK addskip=0
94 subq $15*8+\addskip, %rsp 92 subq $15*8+\addskip, %rsp
95 CFI_ADJUST_CFA_OFFSET 15*8+\addskip
96 .endm 93 .endm
97 94
98 .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1 95 .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
99 .if \r11 96 .if \r11
100 movq_cfi r11, 6*8+\offset 97 movq %r11, 6*8+\offset(%rsp)
101 .endif 98 .endif
102 .if \r8910 99 .if \r8910
103 movq_cfi r10, 7*8+\offset 100 movq %r10, 7*8+\offset(%rsp)
104 movq_cfi r9, 8*8+\offset 101 movq %r9, 8*8+\offset(%rsp)
105 movq_cfi r8, 9*8+\offset 102 movq %r8, 9*8+\offset(%rsp)
106 .endif 103 .endif
107 .if \rax 104 .if \rax
108 movq_cfi rax, 10*8+\offset 105 movq %rax, 10*8+\offset(%rsp)
109 .endif 106 .endif
110 .if \rcx 107 .if \rcx
111 movq_cfi rcx, 11*8+\offset 108 movq %rcx, 11*8+\offset(%rsp)
112 .endif 109 .endif
113 movq_cfi rdx, 12*8+\offset 110 movq %rdx, 12*8+\offset(%rsp)
114 movq_cfi rsi, 13*8+\offset 111 movq %rsi, 13*8+\offset(%rsp)
115 movq_cfi rdi, 14*8+\offset 112 movq %rdi, 14*8+\offset(%rsp)
116 .endm 113 .endm
117 .macro SAVE_C_REGS offset=0 114 .macro SAVE_C_REGS offset=0
118 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 115 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
@@ -131,24 +128,24 @@ For 32-bit we have the following conventions - kernel is built with
131 .endm 128 .endm
132 129
133 .macro SAVE_EXTRA_REGS offset=0 130 .macro SAVE_EXTRA_REGS offset=0
134 movq_cfi r15, 0*8+\offset 131 movq %r15, 0*8+\offset(%rsp)
135 movq_cfi r14, 1*8+\offset 132 movq %r14, 1*8+\offset(%rsp)
136 movq_cfi r13, 2*8+\offset 133 movq %r13, 2*8+\offset(%rsp)
137 movq_cfi r12, 3*8+\offset 134 movq %r12, 3*8+\offset(%rsp)
138 movq_cfi rbp, 4*8+\offset 135 movq %rbp, 4*8+\offset(%rsp)
139 movq_cfi rbx, 5*8+\offset 136 movq %rbx, 5*8+\offset(%rsp)
140 .endm 137 .endm
141 .macro SAVE_EXTRA_REGS_RBP offset=0 138 .macro SAVE_EXTRA_REGS_RBP offset=0
142 movq_cfi rbp, 4*8+\offset 139 movq %rbp, 4*8+\offset(%rsp)
143 .endm 140 .endm
144 141
145 .macro RESTORE_EXTRA_REGS offset=0 142 .macro RESTORE_EXTRA_REGS offset=0
146 movq_cfi_restore 0*8+\offset, r15 143 movq 0*8+\offset(%rsp), %r15
147 movq_cfi_restore 1*8+\offset, r14 144 movq 1*8+\offset(%rsp), %r14
148 movq_cfi_restore 2*8+\offset, r13 145 movq 2*8+\offset(%rsp), %r13
149 movq_cfi_restore 3*8+\offset, r12 146 movq 3*8+\offset(%rsp), %r12
150 movq_cfi_restore 4*8+\offset, rbp 147 movq 4*8+\offset(%rsp), %rbp
151 movq_cfi_restore 5*8+\offset, rbx 148 movq 5*8+\offset(%rsp), %rbx
152 .endm 149 .endm
153 150
154 .macro ZERO_EXTRA_REGS 151 .macro ZERO_EXTRA_REGS
@@ -162,24 +159,24 @@ For 32-bit we have the following conventions - kernel is built with
162 159
163 .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1 160 .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
164 .if \rstor_r11 161 .if \rstor_r11
165 movq_cfi_restore 6*8, r11 162 movq 6*8(%rsp), %r11
166 .endif 163 .endif
167 .if \rstor_r8910 164 .if \rstor_r8910
168 movq_cfi_restore 7*8, r10 165 movq 7*8(%rsp), %r10
169 movq_cfi_restore 8*8, r9 166 movq 8*8(%rsp), %r9
170 movq_cfi_restore 9*8, r8 167 movq 9*8(%rsp), %r8
171 .endif 168 .endif
172 .if \rstor_rax 169 .if \rstor_rax
173 movq_cfi_restore 10*8, rax 170 movq 10*8(%rsp), %rax
174 .endif 171 .endif
175 .if \rstor_rcx 172 .if \rstor_rcx
176 movq_cfi_restore 11*8, rcx 173 movq 11*8(%rsp), %rcx
177 .endif 174 .endif
178 .if \rstor_rdx 175 .if \rstor_rdx
179 movq_cfi_restore 12*8, rdx 176 movq 12*8(%rsp), %rdx
180 .endif 177 .endif
181 movq_cfi_restore 13*8, rsi 178 movq 13*8(%rsp), %rsi
182 movq_cfi_restore 14*8, rdi 179 movq 14*8(%rsp), %rdi
183 .endm 180 .endm
184 .macro RESTORE_C_REGS 181 .macro RESTORE_C_REGS
185 RESTORE_C_REGS_HELPER 1,1,1,1,1 182 RESTORE_C_REGS_HELPER 1,1,1,1,1
@@ -205,7 +202,6 @@ For 32-bit we have the following conventions - kernel is built with
205 202
206 .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0 203 .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
207 addq $15*8+\addskip, %rsp 204 addq $15*8+\addskip, %rsp
208 CFI_ADJUST_CFA_OFFSET -(15*8+\addskip)
209 .endm 205 .endm
210 206
211 .macro icebp 207 .macro icebp
@@ -224,23 +220,23 @@ For 32-bit we have the following conventions - kernel is built with
224 */ 220 */
225 221
226 .macro SAVE_ALL 222 .macro SAVE_ALL
227 pushl_cfi_reg eax 223 pushl %eax
228 pushl_cfi_reg ebp 224 pushl %ebp
229 pushl_cfi_reg edi 225 pushl %edi
230 pushl_cfi_reg esi 226 pushl %esi
231 pushl_cfi_reg edx 227 pushl %edx
232 pushl_cfi_reg ecx 228 pushl %ecx
233 pushl_cfi_reg ebx 229 pushl %ebx
234 .endm 230 .endm
235 231
236 .macro RESTORE_ALL 232 .macro RESTORE_ALL
237 popl_cfi_reg ebx 233 popl %ebx
238 popl_cfi_reg ecx 234 popl %ecx
239 popl_cfi_reg edx 235 popl %edx
240 popl_cfi_reg esi 236 popl %esi
241 popl_cfi_reg edi 237 popl %edi
242 popl_cfi_reg ebp 238 popl %ebp
243 popl_cfi_reg eax 239 popl %eax
244 .endm 240 .endm
245 241
246#endif /* CONFIG_X86_64 */ 242#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
deleted file mode 100644
index de1cdaf4d743..000000000000
--- a/arch/x86/include/asm/dwarf2.h
+++ /dev/null
@@ -1,170 +0,0 @@
1#ifndef _ASM_X86_DWARF2_H
2#define _ASM_X86_DWARF2_H
3
4#ifndef __ASSEMBLY__
5#warning "asm/dwarf2.h should be only included in pure assembly files"
6#endif
7
8/*
9 * Macros for dwarf2 CFI unwind table entries.
10 * See "as.info" for details on these pseudo ops. Unfortunately
11 * they are only supported in very new binutils, so define them
12 * away for older version.
13 */
14
15#ifdef CONFIG_AS_CFI
16
17#define CFI_STARTPROC .cfi_startproc
18#define CFI_ENDPROC .cfi_endproc
19#define CFI_DEF_CFA .cfi_def_cfa
20#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
21#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
22#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
23#define CFI_OFFSET .cfi_offset
24#define CFI_REL_OFFSET .cfi_rel_offset
25#define CFI_REGISTER .cfi_register
26#define CFI_RESTORE .cfi_restore
27#define CFI_REMEMBER_STATE .cfi_remember_state
28#define CFI_RESTORE_STATE .cfi_restore_state
29#define CFI_UNDEFINED .cfi_undefined
30#define CFI_ESCAPE .cfi_escape
31
32#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
33#define CFI_SIGNAL_FRAME .cfi_signal_frame
34#else
35#define CFI_SIGNAL_FRAME
36#endif
37
38#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
39 /*
40 * Emit CFI data in .debug_frame sections, not .eh_frame sections.
41 * The latter we currently just discard since we don't do DWARF
42 * unwinding at runtime. So only the offline DWARF information is
43 * useful to anyone. Note we should not use this directive if this
44 * file is used in the vDSO assembly, or if vmlinux.lds.S gets
45 * changed so it doesn't discard .eh_frame.
46 */
47 .cfi_sections .debug_frame
48#endif
49
50#else
51
52/*
53 * Due to the structure of pre-exisiting code, don't use assembler line
54 * comment character # to ignore the arguments. Instead, use a dummy macro.
55 */
56.macro cfi_ignore a=0, b=0, c=0, d=0
57.endm
58
59#define CFI_STARTPROC cfi_ignore
60#define CFI_ENDPROC cfi_ignore
61#define CFI_DEF_CFA cfi_ignore
62#define CFI_DEF_CFA_REGISTER cfi_ignore
63#define CFI_DEF_CFA_OFFSET cfi_ignore
64#define CFI_ADJUST_CFA_OFFSET cfi_ignore
65#define CFI_OFFSET cfi_ignore
66#define CFI_REL_OFFSET cfi_ignore
67#define CFI_REGISTER cfi_ignore
68#define CFI_RESTORE cfi_ignore
69#define CFI_REMEMBER_STATE cfi_ignore
70#define CFI_RESTORE_STATE cfi_ignore
71#define CFI_UNDEFINED cfi_ignore
72#define CFI_ESCAPE cfi_ignore
73#define CFI_SIGNAL_FRAME cfi_ignore
74
75#endif
76
77/*
78 * An attempt to make CFI annotations more or less
79 * correct and shorter. It is implied that you know
80 * what you're doing if you use them.
81 */
82#ifdef __ASSEMBLY__
83#ifdef CONFIG_X86_64
84 .macro pushq_cfi reg
85 pushq \reg
86 CFI_ADJUST_CFA_OFFSET 8
87 .endm
88
89 .macro pushq_cfi_reg reg
90 pushq %\reg
91 CFI_ADJUST_CFA_OFFSET 8
92 CFI_REL_OFFSET \reg, 0
93 .endm
94
95 .macro popq_cfi reg
96 popq \reg
97 CFI_ADJUST_CFA_OFFSET -8
98 .endm
99
100 .macro popq_cfi_reg reg
101 popq %\reg
102 CFI_ADJUST_CFA_OFFSET -8
103 CFI_RESTORE \reg
104 .endm
105
106 .macro pushfq_cfi
107 pushfq
108 CFI_ADJUST_CFA_OFFSET 8
109 .endm
110
111 .macro popfq_cfi
112 popfq
113 CFI_ADJUST_CFA_OFFSET -8
114 .endm
115
116 .macro movq_cfi reg offset=0
117 movq %\reg, \offset(%rsp)
118 CFI_REL_OFFSET \reg, \offset
119 .endm
120
121 .macro movq_cfi_restore offset reg
122 movq \offset(%rsp), %\reg
123 CFI_RESTORE \reg
124 .endm
125#else /*!CONFIG_X86_64*/
126 .macro pushl_cfi reg
127 pushl \reg
128 CFI_ADJUST_CFA_OFFSET 4
129 .endm
130
131 .macro pushl_cfi_reg reg
132 pushl %\reg
133 CFI_ADJUST_CFA_OFFSET 4
134 CFI_REL_OFFSET \reg, 0
135 .endm
136
137 .macro popl_cfi reg
138 popl \reg
139 CFI_ADJUST_CFA_OFFSET -4
140 .endm
141
142 .macro popl_cfi_reg reg
143 popl %\reg
144 CFI_ADJUST_CFA_OFFSET -4
145 CFI_RESTORE \reg
146 .endm
147
148 .macro pushfl_cfi
149 pushfl
150 CFI_ADJUST_CFA_OFFSET 4
151 .endm
152
153 .macro popfl_cfi
154 popfl
155 CFI_ADJUST_CFA_OFFSET -4
156 .endm
157
158 .macro movl_cfi reg offset=0
159 movl %\reg, \offset(%esp)
160 CFI_REL_OFFSET \reg, \offset
161 .endm
162
163 .macro movl_cfi_restore offset reg
164 movl \offset(%esp), %\reg
165 CFI_RESTORE \reg
166 .endm
167#endif /*!CONFIG_X86_64*/
168#endif /*__ASSEMBLY__*/
169
170#endif /* _ASM_X86_DWARF2_H */
diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h
index 3b629f47eb65..793179cf8e21 100644
--- a/arch/x86/include/asm/frame.h
+++ b/arch/x86/include/asm/frame.h
@@ -1,20 +1,17 @@
1#ifdef __ASSEMBLY__ 1#ifdef __ASSEMBLY__
2 2
3#include <asm/asm.h> 3#include <asm/asm.h>
4#include <asm/dwarf2.h>
5 4
6/* The annotation hides the frame from the unwinder and makes it look 5/* The annotation hides the frame from the unwinder and makes it look
7 like a ordinary ebp save/restore. This avoids some special cases for 6 like a ordinary ebp save/restore. This avoids some special cases for
8 frame pointer later */ 7 frame pointer later */
9#ifdef CONFIG_FRAME_POINTER 8#ifdef CONFIG_FRAME_POINTER
10 .macro FRAME 9 .macro FRAME
11 __ASM_SIZE(push,_cfi) %__ASM_REG(bp) 10 __ASM_SIZE(push,) %__ASM_REG(bp)
12 CFI_REL_OFFSET __ASM_REG(bp), 0
13 __ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp) 11 __ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp)
14 .endm 12 .endm
15 .macro ENDFRAME 13 .macro ENDFRAME
16 __ASM_SIZE(pop,_cfi) %__ASM_REG(bp) 14 __ASM_SIZE(pop,) %__ASM_REG(bp)
17 CFI_RESTORE __ASM_REG(bp)
18 .endm 15 .endm
19#else 16#else
20 .macro FRAME 17 .macro FRAME
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 1c309763e321..0ac73de925d1 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -50,7 +50,6 @@
50#include <asm/smp.h> 50#include <asm/smp.h>
51#include <asm/page_types.h> 51#include <asm/page_types.h>
52#include <asm/percpu.h> 52#include <asm/percpu.h>
53#include <asm/dwarf2.h>
54#include <asm/processor-flags.h> 53#include <asm/processor-flags.h>
55#include <asm/ftrace.h> 54#include <asm/ftrace.h>
56#include <asm/irq_vectors.h> 55#include <asm/irq_vectors.h>
@@ -113,11 +112,10 @@
113 112
114 /* unfortunately push/pop can't be no-op */ 113 /* unfortunately push/pop can't be no-op */
115.macro PUSH_GS 114.macro PUSH_GS
116 pushl_cfi $0 115 pushl $0
117.endm 116.endm
118.macro POP_GS pop=0 117.macro POP_GS pop=0
119 addl $(4 + \pop), %esp 118 addl $(4 + \pop), %esp
120 CFI_ADJUST_CFA_OFFSET -(4 + \pop)
121.endm 119.endm
122.macro POP_GS_EX 120.macro POP_GS_EX
123.endm 121.endm
@@ -137,16 +135,13 @@
137#else /* CONFIG_X86_32_LAZY_GS */ 135#else /* CONFIG_X86_32_LAZY_GS */
138 136
139.macro PUSH_GS 137.macro PUSH_GS
140 pushl_cfi %gs 138 pushl %gs
141 /*CFI_REL_OFFSET gs, 0*/
142.endm 139.endm
143 140
144.macro POP_GS pop=0 141.macro POP_GS pop=0
14598: popl_cfi %gs 14298: popl %gs
146 /*CFI_RESTORE gs*/
147 .if \pop <> 0 143 .if \pop <> 0
148 add $\pop, %esp 144 add $\pop, %esp
149 CFI_ADJUST_CFA_OFFSET -\pop
150 .endif 145 .endif
151.endm 146.endm
152.macro POP_GS_EX 147.macro POP_GS_EX
@@ -170,11 +165,9 @@
170 165
171.macro GS_TO_REG reg 166.macro GS_TO_REG reg
172 movl %gs, \reg 167 movl %gs, \reg
173 /*CFI_REGISTER gs, \reg*/
174.endm 168.endm
175.macro REG_TO_PTGS reg 169.macro REG_TO_PTGS reg
176 movl \reg, PT_GS(%esp) 170 movl \reg, PT_GS(%esp)
177 /*CFI_REL_OFFSET gs, PT_GS*/
178.endm 171.endm
179.macro SET_KERNEL_GS reg 172.macro SET_KERNEL_GS reg
180 movl $(__KERNEL_STACK_CANARY), \reg 173 movl $(__KERNEL_STACK_CANARY), \reg
@@ -186,26 +179,16 @@
186.macro SAVE_ALL 179.macro SAVE_ALL
187 cld 180 cld
188 PUSH_GS 181 PUSH_GS
189 pushl_cfi %fs 182 pushl %fs
190 /*CFI_REL_OFFSET fs, 0;*/ 183 pushl %es
191 pushl_cfi %es 184 pushl %ds
192 /*CFI_REL_OFFSET es, 0;*/ 185 pushl %eax
193 pushl_cfi %ds 186 pushl %ebp
194 /*CFI_REL_OFFSET ds, 0;*/ 187 pushl %edi
195 pushl_cfi %eax 188 pushl %esi
196 CFI_REL_OFFSET eax, 0 189 pushl %edx
197 pushl_cfi %ebp 190 pushl %ecx
198 CFI_REL_OFFSET ebp, 0 191 pushl %ebx
199 pushl_cfi %edi
200 CFI_REL_OFFSET edi, 0
201 pushl_cfi %esi
202 CFI_REL_OFFSET esi, 0
203 pushl_cfi %edx
204 CFI_REL_OFFSET edx, 0
205 pushl_cfi %ecx
206 CFI_REL_OFFSET ecx, 0
207 pushl_cfi %ebx
208 CFI_REL_OFFSET ebx, 0
209 movl $(__USER_DS), %edx 192 movl $(__USER_DS), %edx
210 movl %edx, %ds 193 movl %edx, %ds
211 movl %edx, %es 194 movl %edx, %es
@@ -215,30 +198,20 @@
215.endm 198.endm
216 199
217.macro RESTORE_INT_REGS 200.macro RESTORE_INT_REGS
218 popl_cfi %ebx 201 popl %ebx
219 CFI_RESTORE ebx 202 popl %ecx
220 popl_cfi %ecx 203 popl %edx
221 CFI_RESTORE ecx 204 popl %esi
222 popl_cfi %edx 205 popl %edi
223 CFI_RESTORE edx 206 popl %ebp
224 popl_cfi %esi 207 popl %eax
225 CFI_RESTORE esi
226 popl_cfi %edi
227 CFI_RESTORE edi
228 popl_cfi %ebp
229 CFI_RESTORE ebp
230 popl_cfi %eax
231 CFI_RESTORE eax
232.endm 208.endm
233 209
234.macro RESTORE_REGS pop=0 210.macro RESTORE_REGS pop=0
235 RESTORE_INT_REGS 211 RESTORE_INT_REGS
2361: popl_cfi %ds 2121: popl %ds
237 /*CFI_RESTORE ds;*/ 2132: popl %es
2382: popl_cfi %es 2143: popl %fs
239 /*CFI_RESTORE es;*/
2403: popl_cfi %fs
241 /*CFI_RESTORE fs;*/
242 POP_GS \pop 215 POP_GS \pop
243.pushsection .fixup, "ax" 216.pushsection .fixup, "ax"
2444: movl $0, (%esp) 2174: movl $0, (%esp)
@@ -254,64 +227,27 @@
254 POP_GS_EX 227 POP_GS_EX
255.endm 228.endm
256 229
257.macro RING0_INT_FRAME
258 CFI_STARTPROC simple
259 CFI_SIGNAL_FRAME
260 CFI_DEF_CFA esp, 3*4
261 /*CFI_OFFSET cs, -2*4;*/
262 CFI_OFFSET eip, -3*4
263.endm
264
265.macro RING0_EC_FRAME
266 CFI_STARTPROC simple
267 CFI_SIGNAL_FRAME
268 CFI_DEF_CFA esp, 4*4
269 /*CFI_OFFSET cs, -2*4;*/
270 CFI_OFFSET eip, -3*4
271.endm
272
273.macro RING0_PTREGS_FRAME
274 CFI_STARTPROC simple
275 CFI_SIGNAL_FRAME
276 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
277 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
278 CFI_OFFSET eip, PT_EIP-PT_OLDESP
279 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
280 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
281 CFI_OFFSET eax, PT_EAX-PT_OLDESP
282 CFI_OFFSET ebp, PT_EBP-PT_OLDESP
283 CFI_OFFSET edi, PT_EDI-PT_OLDESP
284 CFI_OFFSET esi, PT_ESI-PT_OLDESP
285 CFI_OFFSET edx, PT_EDX-PT_OLDESP
286 CFI_OFFSET ecx, PT_ECX-PT_OLDESP
287 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
288.endm
289
290ENTRY(ret_from_fork) 230ENTRY(ret_from_fork)
291 CFI_STARTPROC 231 pushl %eax
292 pushl_cfi %eax
293 call schedule_tail 232 call schedule_tail
294 GET_THREAD_INFO(%ebp) 233 GET_THREAD_INFO(%ebp)
295 popl_cfi %eax 234 popl %eax
296 pushl_cfi $0x0202 # Reset kernel eflags 235 pushl $0x0202 # Reset kernel eflags
297 popfl_cfi 236 popfl
298 jmp syscall_exit 237 jmp syscall_exit
299 CFI_ENDPROC
300END(ret_from_fork) 238END(ret_from_fork)
301 239
302ENTRY(ret_from_kernel_thread) 240ENTRY(ret_from_kernel_thread)
303 CFI_STARTPROC 241 pushl %eax
304 pushl_cfi %eax
305 call schedule_tail 242 call schedule_tail
306 GET_THREAD_INFO(%ebp) 243 GET_THREAD_INFO(%ebp)
307 popl_cfi %eax 244 popl %eax
308 pushl_cfi $0x0202 # Reset kernel eflags 245 pushl $0x0202 # Reset kernel eflags
309 popfl_cfi 246 popfl
310 movl PT_EBP(%esp),%eax 247 movl PT_EBP(%esp),%eax
311 call *PT_EBX(%esp) 248 call *PT_EBX(%esp)
312 movl $0,PT_EAX(%esp) 249 movl $0,PT_EAX(%esp)
313 jmp syscall_exit 250 jmp syscall_exit
314 CFI_ENDPROC
315ENDPROC(ret_from_kernel_thread) 251ENDPROC(ret_from_kernel_thread)
316 252
317/* 253/*
@@ -323,7 +259,6 @@ ENDPROC(ret_from_kernel_thread)
323 259
324 # userspace resumption stub bypassing syscall exit tracing 260 # userspace resumption stub bypassing syscall exit tracing
325 ALIGN 261 ALIGN
326 RING0_PTREGS_FRAME
327ret_from_exception: 262ret_from_exception:
328 preempt_stop(CLBR_ANY) 263 preempt_stop(CLBR_ANY)
329ret_from_intr: 264ret_from_intr:
@@ -367,17 +302,12 @@ need_resched:
367 jmp need_resched 302 jmp need_resched
368END(resume_kernel) 303END(resume_kernel)
369#endif 304#endif
370 CFI_ENDPROC
371 305
372/* SYSENTER_RETURN points to after the "sysenter" instruction in 306/* SYSENTER_RETURN points to after the "sysenter" instruction in
373 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ 307 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
374 308
375 # sysenter call handler stub 309 # sysenter call handler stub
376ENTRY(ia32_sysenter_target) 310ENTRY(ia32_sysenter_target)
377 CFI_STARTPROC simple
378 CFI_SIGNAL_FRAME
379 CFI_DEF_CFA esp, 0
380 CFI_REGISTER esp, ebp
381 movl TSS_sysenter_sp0(%esp),%esp 311 movl TSS_sysenter_sp0(%esp),%esp
382sysenter_past_esp: 312sysenter_past_esp:
383 /* 313 /*
@@ -385,14 +315,11 @@ sysenter_past_esp:
385 * enough kernel state to call TRACE_IRQS_OFF can be called - but 315 * enough kernel state to call TRACE_IRQS_OFF can be called - but
386 * we immediately enable interrupts at that point anyway. 316 * we immediately enable interrupts at that point anyway.
387 */ 317 */
388 pushl_cfi $__USER_DS 318 pushl $__USER_DS
389 /*CFI_REL_OFFSET ss, 0*/ 319 pushl %ebp
390 pushl_cfi %ebp 320 pushfl
391 CFI_REL_OFFSET esp, 0
392 pushfl_cfi
393 orl $X86_EFLAGS_IF, (%esp) 321 orl $X86_EFLAGS_IF, (%esp)
394 pushl_cfi $__USER_CS 322 pushl $__USER_CS
395 /*CFI_REL_OFFSET cs, 0*/
396 /* 323 /*
397 * Push current_thread_info()->sysenter_return to the stack. 324 * Push current_thread_info()->sysenter_return to the stack.
398 * A tiny bit of offset fixup is necessary: TI_sysenter_return 325 * A tiny bit of offset fixup is necessary: TI_sysenter_return
@@ -401,10 +328,9 @@ sysenter_past_esp:
401 * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack; 328 * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
402 * and THREAD_SIZE takes us to the bottom. 329 * and THREAD_SIZE takes us to the bottom.
403 */ 330 */
404 pushl_cfi ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp) 331 pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
405 CFI_REL_OFFSET eip, 0
406 332
407 pushl_cfi %eax 333 pushl %eax
408 SAVE_ALL 334 SAVE_ALL
409 ENABLE_INTERRUPTS(CLBR_NONE) 335 ENABLE_INTERRUPTS(CLBR_NONE)
410 336
@@ -453,11 +379,11 @@ sysenter_audit:
453 /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */ 379 /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
454 movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */ 380 movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */
455 /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */ 381 /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
456 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */ 382 pushl PT_ESI(%esp) /* a3: 5th arg */
457 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */ 383 pushl PT_EDX+4(%esp) /* a2: 4th arg */
458 call __audit_syscall_entry 384 call __audit_syscall_entry
459 popl_cfi %ecx /* get that remapped edx off the stack */ 385 popl %ecx /* get that remapped edx off the stack */
460 popl_cfi %ecx /* get that remapped esi off the stack */ 386 popl %ecx /* get that remapped esi off the stack */
461 movl PT_EAX(%esp),%eax /* reload syscall number */ 387 movl PT_EAX(%esp),%eax /* reload syscall number */
462 jmp sysenter_do_call 388 jmp sysenter_do_call
463 389
@@ -480,7 +406,6 @@ sysexit_audit:
480 jmp sysenter_exit 406 jmp sysenter_exit
481#endif 407#endif
482 408
483 CFI_ENDPROC
484.pushsection .fixup,"ax" 409.pushsection .fixup,"ax"
4852: movl $0,PT_FS(%esp) 4102: movl $0,PT_FS(%esp)
486 jmp 1b 411 jmp 1b
@@ -491,9 +416,8 @@ ENDPROC(ia32_sysenter_target)
491 416
492 # system call handler stub 417 # system call handler stub
493ENTRY(system_call) 418ENTRY(system_call)
494 RING0_INT_FRAME # can't unwind into user space anyway
495 ASM_CLAC 419 ASM_CLAC
496 pushl_cfi %eax # save orig_eax 420 pushl %eax # save orig_eax
497 SAVE_ALL 421 SAVE_ALL
498 GET_THREAD_INFO(%ebp) 422 GET_THREAD_INFO(%ebp)
499 # system call tracing in operation / emulation 423 # system call tracing in operation / emulation
@@ -527,7 +451,6 @@ restore_all_notrace:
527 movb PT_CS(%esp), %al 451 movb PT_CS(%esp), %al
528 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax 452 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
529 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax 453 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
530 CFI_REMEMBER_STATE
531 je ldt_ss # returning to user-space with LDT SS 454 je ldt_ss # returning to user-space with LDT SS
532#endif 455#endif
533restore_nocheck: 456restore_nocheck:
@@ -543,7 +466,6 @@ ENTRY(iret_exc)
543 _ASM_EXTABLE(irq_return,iret_exc) 466 _ASM_EXTABLE(irq_return,iret_exc)
544 467
545#ifdef CONFIG_X86_ESPFIX32 468#ifdef CONFIG_X86_ESPFIX32
546 CFI_RESTORE_STATE
547ldt_ss: 469ldt_ss:
548#ifdef CONFIG_PARAVIRT 470#ifdef CONFIG_PARAVIRT
549 /* 471 /*
@@ -577,22 +499,19 @@ ldt_ss:
577 shr $16, %edx 499 shr $16, %edx
578 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ 500 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
579 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ 501 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
580 pushl_cfi $__ESPFIX_SS 502 pushl $__ESPFIX_SS
581 pushl_cfi %eax /* new kernel esp */ 503 pushl %eax /* new kernel esp */
582 /* Disable interrupts, but do not irqtrace this section: we 504 /* Disable interrupts, but do not irqtrace this section: we
583 * will soon execute iret and the tracer was already set to 505 * will soon execute iret and the tracer was already set to
584 * the irqstate after the iret */ 506 * the irqstate after the iret */
585 DISABLE_INTERRUPTS(CLBR_EAX) 507 DISABLE_INTERRUPTS(CLBR_EAX)
586 lss (%esp), %esp /* switch to espfix segment */ 508 lss (%esp), %esp /* switch to espfix segment */
587 CFI_ADJUST_CFA_OFFSET -8
588 jmp restore_nocheck 509 jmp restore_nocheck
589#endif 510#endif
590 CFI_ENDPROC
591ENDPROC(system_call) 511ENDPROC(system_call)
592 512
593 # perform work that needs to be done immediately before resumption 513 # perform work that needs to be done immediately before resumption
594 ALIGN 514 ALIGN
595 RING0_PTREGS_FRAME # can't unwind into user space anyway
596work_pending: 515work_pending:
597 testb $_TIF_NEED_RESCHED, %cl 516 testb $_TIF_NEED_RESCHED, %cl
598 jz work_notifysig 517 jz work_notifysig
@@ -634,9 +553,9 @@ work_notifysig: # deal with pending signals and
634#ifdef CONFIG_VM86 553#ifdef CONFIG_VM86
635 ALIGN 554 ALIGN
636work_notifysig_v86: 555work_notifysig_v86:
637 pushl_cfi %ecx # save ti_flags for do_notify_resume 556 pushl %ecx # save ti_flags for do_notify_resume
638 call save_v86_state # %eax contains pt_regs pointer 557 call save_v86_state # %eax contains pt_regs pointer
639 popl_cfi %ecx 558 popl %ecx
640 movl %eax, %esp 559 movl %eax, %esp
641 jmp 1b 560 jmp 1b
642#endif 561#endif
@@ -666,9 +585,7 @@ syscall_exit_work:
666 call syscall_trace_leave 585 call syscall_trace_leave
667 jmp resume_userspace 586 jmp resume_userspace
668END(syscall_exit_work) 587END(syscall_exit_work)
669 CFI_ENDPROC
670 588
671 RING0_INT_FRAME # can't unwind into user space anyway
672syscall_fault: 589syscall_fault:
673 ASM_CLAC 590 ASM_CLAC
674 GET_THREAD_INFO(%ebp) 591 GET_THREAD_INFO(%ebp)
@@ -685,7 +602,6 @@ sysenter_badsys:
685 movl $-ENOSYS,%eax 602 movl $-ENOSYS,%eax
686 jmp sysenter_after_call 603 jmp sysenter_after_call
687END(sysenter_badsys) 604END(sysenter_badsys)
688 CFI_ENDPROC
689 605
690.macro FIXUP_ESPFIX_STACK 606.macro FIXUP_ESPFIX_STACK
691/* 607/*
@@ -701,10 +617,9 @@ END(sysenter_badsys)
701 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ 617 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
702 shl $16, %eax 618 shl $16, %eax
703 addl %esp, %eax /* the adjusted stack pointer */ 619 addl %esp, %eax /* the adjusted stack pointer */
704 pushl_cfi $__KERNEL_DS 620 pushl $__KERNEL_DS
705 pushl_cfi %eax 621 pushl %eax
706 lss (%esp), %esp /* switch to the normal stack segment */ 622 lss (%esp), %esp /* switch to the normal stack segment */
707 CFI_ADJUST_CFA_OFFSET -8
708#endif 623#endif
709.endm 624.endm
710.macro UNWIND_ESPFIX_STACK 625.macro UNWIND_ESPFIX_STACK
@@ -728,13 +643,11 @@ END(sysenter_badsys)
728 */ 643 */
729 .align 8 644 .align 8
730ENTRY(irq_entries_start) 645ENTRY(irq_entries_start)
731 RING0_INT_FRAME
732 vector=FIRST_EXTERNAL_VECTOR 646 vector=FIRST_EXTERNAL_VECTOR
733 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) 647 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
734 pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */ 648 pushl $(~vector+0x80) /* Note: always in signed byte range */
735 vector=vector+1 649 vector=vector+1
736 jmp common_interrupt 650 jmp common_interrupt
737 CFI_ADJUST_CFA_OFFSET -4
738 .align 8 651 .align 8
739 .endr 652 .endr
740END(irq_entries_start) 653END(irq_entries_start)
@@ -753,19 +666,16 @@ common_interrupt:
753 call do_IRQ 666 call do_IRQ
754 jmp ret_from_intr 667 jmp ret_from_intr
755ENDPROC(common_interrupt) 668ENDPROC(common_interrupt)
756 CFI_ENDPROC
757 669
758#define BUILD_INTERRUPT3(name, nr, fn) \ 670#define BUILD_INTERRUPT3(name, nr, fn) \
759ENTRY(name) \ 671ENTRY(name) \
760 RING0_INT_FRAME; \
761 ASM_CLAC; \ 672 ASM_CLAC; \
762 pushl_cfi $~(nr); \ 673 pushl $~(nr); \
763 SAVE_ALL; \ 674 SAVE_ALL; \
764 TRACE_IRQS_OFF \ 675 TRACE_IRQS_OFF \
765 movl %esp,%eax; \ 676 movl %esp,%eax; \
766 call fn; \ 677 call fn; \
767 jmp ret_from_intr; \ 678 jmp ret_from_intr; \
768 CFI_ENDPROC; \
769ENDPROC(name) 679ENDPROC(name)
770 680
771 681
@@ -784,37 +694,31 @@ ENDPROC(name)
784#include <asm/entry_arch.h> 694#include <asm/entry_arch.h>
785 695
786ENTRY(coprocessor_error) 696ENTRY(coprocessor_error)
787 RING0_INT_FRAME
788 ASM_CLAC 697 ASM_CLAC
789 pushl_cfi $0 698 pushl $0
790 pushl_cfi $do_coprocessor_error 699 pushl $do_coprocessor_error
791 jmp error_code 700 jmp error_code
792 CFI_ENDPROC
793END(coprocessor_error) 701END(coprocessor_error)
794 702
795ENTRY(simd_coprocessor_error) 703ENTRY(simd_coprocessor_error)
796 RING0_INT_FRAME
797 ASM_CLAC 704 ASM_CLAC
798 pushl_cfi $0 705 pushl $0
799#ifdef CONFIG_X86_INVD_BUG 706#ifdef CONFIG_X86_INVD_BUG
800 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ 707 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
801 ALTERNATIVE "pushl_cfi $do_general_protection", \ 708 ALTERNATIVE "pushl $do_general_protection", \
802 "pushl $do_simd_coprocessor_error", \ 709 "pushl $do_simd_coprocessor_error", \
803 X86_FEATURE_XMM 710 X86_FEATURE_XMM
804#else 711#else
805 pushl_cfi $do_simd_coprocessor_error 712 pushl $do_simd_coprocessor_error
806#endif 713#endif
807 jmp error_code 714 jmp error_code
808 CFI_ENDPROC
809END(simd_coprocessor_error) 715END(simd_coprocessor_error)
810 716
811ENTRY(device_not_available) 717ENTRY(device_not_available)
812 RING0_INT_FRAME
813 ASM_CLAC 718 ASM_CLAC
814 pushl_cfi $-1 # mark this as an int 719 pushl $-1 # mark this as an int
815 pushl_cfi $do_device_not_available 720 pushl $do_device_not_available
816 jmp error_code 721 jmp error_code
817 CFI_ENDPROC
818END(device_not_available) 722END(device_not_available)
819 723
820#ifdef CONFIG_PARAVIRT 724#ifdef CONFIG_PARAVIRT
@@ -830,115 +734,89 @@ END(native_irq_enable_sysexit)
830#endif 734#endif
831 735
832ENTRY(overflow) 736ENTRY(overflow)
833 RING0_INT_FRAME
834 ASM_CLAC 737 ASM_CLAC
835 pushl_cfi $0 738 pushl $0
836 pushl_cfi $do_overflow 739 pushl $do_overflow
837 jmp error_code 740 jmp error_code
838 CFI_ENDPROC
839END(overflow) 741END(overflow)
840 742
841ENTRY(bounds) 743ENTRY(bounds)
842 RING0_INT_FRAME
843 ASM_CLAC 744 ASM_CLAC
844 pushl_cfi $0 745 pushl $0
845 pushl_cfi $do_bounds 746 pushl $do_bounds
846 jmp error_code 747 jmp error_code
847 CFI_ENDPROC
848END(bounds) 748END(bounds)
849 749
850ENTRY(invalid_op) 750ENTRY(invalid_op)
851 RING0_INT_FRAME
852 ASM_CLAC 751 ASM_CLAC
853 pushl_cfi $0 752 pushl $0
854 pushl_cfi $do_invalid_op 753 pushl $do_invalid_op
855 jmp error_code 754 jmp error_code
856 CFI_ENDPROC
857END(invalid_op) 755END(invalid_op)
858 756
859ENTRY(coprocessor_segment_overrun) 757ENTRY(coprocessor_segment_overrun)
860 RING0_INT_FRAME
861 ASM_CLAC 758 ASM_CLAC
862 pushl_cfi $0 759 pushl $0
863 pushl_cfi $do_coprocessor_segment_overrun 760 pushl $do_coprocessor_segment_overrun
864 jmp error_code 761 jmp error_code
865 CFI_ENDPROC
866END(coprocessor_segment_overrun) 762END(coprocessor_segment_overrun)
867 763
868ENTRY(invalid_TSS) 764ENTRY(invalid_TSS)
869 RING0_EC_FRAME
870 ASM_CLAC 765 ASM_CLAC
871 pushl_cfi $do_invalid_TSS 766 pushl $do_invalid_TSS
872 jmp error_code 767 jmp error_code
873 CFI_ENDPROC
874END(invalid_TSS) 768END(invalid_TSS)
875 769
876ENTRY(segment_not_present) 770ENTRY(segment_not_present)
877 RING0_EC_FRAME
878 ASM_CLAC 771 ASM_CLAC
879 pushl_cfi $do_segment_not_present 772 pushl $do_segment_not_present
880 jmp error_code 773 jmp error_code
881 CFI_ENDPROC
882END(segment_not_present) 774END(segment_not_present)
883 775
884ENTRY(stack_segment) 776ENTRY(stack_segment)
885 RING0_EC_FRAME
886 ASM_CLAC 777 ASM_CLAC
887 pushl_cfi $do_stack_segment 778 pushl $do_stack_segment
888 jmp error_code 779 jmp error_code
889 CFI_ENDPROC
890END(stack_segment) 780END(stack_segment)
891 781
892ENTRY(alignment_check) 782ENTRY(alignment_check)
893 RING0_EC_FRAME
894 ASM_CLAC 783 ASM_CLAC
895 pushl_cfi $do_alignment_check 784 pushl $do_alignment_check
896 jmp error_code 785 jmp error_code
897 CFI_ENDPROC
898END(alignment_check) 786END(alignment_check)
899 787
900ENTRY(divide_error) 788ENTRY(divide_error)
901 RING0_INT_FRAME
902 ASM_CLAC 789 ASM_CLAC
903 pushl_cfi $0 # no error code 790 pushl $0 # no error code
904 pushl_cfi $do_divide_error 791 pushl $do_divide_error
905 jmp error_code 792 jmp error_code
906 CFI_ENDPROC
907END(divide_error) 793END(divide_error)
908 794
909#ifdef CONFIG_X86_MCE 795#ifdef CONFIG_X86_MCE
910ENTRY(machine_check) 796ENTRY(machine_check)
911 RING0_INT_FRAME
912 ASM_CLAC 797 ASM_CLAC
913 pushl_cfi $0 798 pushl $0
914 pushl_cfi machine_check_vector 799 pushl machine_check_vector
915 jmp error_code 800 jmp error_code
916 CFI_ENDPROC
917END(machine_check) 801END(machine_check)
918#endif 802#endif
919 803
920ENTRY(spurious_interrupt_bug) 804ENTRY(spurious_interrupt_bug)
921 RING0_INT_FRAME
922 ASM_CLAC 805 ASM_CLAC
923 pushl_cfi $0 806 pushl $0
924 pushl_cfi $do_spurious_interrupt_bug 807 pushl $do_spurious_interrupt_bug
925 jmp error_code 808 jmp error_code
926 CFI_ENDPROC
927END(spurious_interrupt_bug) 809END(spurious_interrupt_bug)
928 810
929#ifdef CONFIG_XEN 811#ifdef CONFIG_XEN
930/* Xen doesn't set %esp to be precisely what the normal sysenter 812/* Xen doesn't set %esp to be precisely what the normal sysenter
931 entrypoint expects, so fix it up before using the normal path. */ 813 entrypoint expects, so fix it up before using the normal path. */
932ENTRY(xen_sysenter_target) 814ENTRY(xen_sysenter_target)
933 RING0_INT_FRAME
934 addl $5*4, %esp /* remove xen-provided frame */ 815 addl $5*4, %esp /* remove xen-provided frame */
935 CFI_ADJUST_CFA_OFFSET -5*4
936 jmp sysenter_past_esp 816 jmp sysenter_past_esp
937 CFI_ENDPROC
938 817
939ENTRY(xen_hypervisor_callback) 818ENTRY(xen_hypervisor_callback)
940 CFI_STARTPROC 819 pushl $-1 /* orig_ax = -1 => not a system call */
941 pushl_cfi $-1 /* orig_ax = -1 => not a system call */
942 SAVE_ALL 820 SAVE_ALL
943 TRACE_IRQS_OFF 821 TRACE_IRQS_OFF
944 822
@@ -962,7 +840,6 @@ ENTRY(xen_do_upcall)
962 call xen_maybe_preempt_hcall 840 call xen_maybe_preempt_hcall
963#endif 841#endif
964 jmp ret_from_intr 842 jmp ret_from_intr
965 CFI_ENDPROC
966ENDPROC(xen_hypervisor_callback) 843ENDPROC(xen_hypervisor_callback)
967 844
968# Hypervisor uses this for application faults while it executes. 845# Hypervisor uses this for application faults while it executes.
@@ -976,8 +853,7 @@ ENDPROC(xen_hypervisor_callback)
976# to pop the stack frame we end up in an infinite loop of failsafe callbacks. 853# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
977# We distinguish between categories by maintaining a status value in EAX. 854# We distinguish between categories by maintaining a status value in EAX.
978ENTRY(xen_failsafe_callback) 855ENTRY(xen_failsafe_callback)
979 CFI_STARTPROC 856 pushl %eax
980 pushl_cfi %eax
981 movl $1,%eax 857 movl $1,%eax
9821: mov 4(%esp),%ds 8581: mov 4(%esp),%ds
9832: mov 8(%esp),%es 8592: mov 8(%esp),%es
@@ -986,15 +862,13 @@ ENTRY(xen_failsafe_callback)
986 /* EAX == 0 => Category 1 (Bad segment) 862 /* EAX == 0 => Category 1 (Bad segment)
987 EAX != 0 => Category 2 (Bad IRET) */ 863 EAX != 0 => Category 2 (Bad IRET) */
988 testl %eax,%eax 864 testl %eax,%eax
989 popl_cfi %eax 865 popl %eax
990 lea 16(%esp),%esp 866 lea 16(%esp),%esp
991 CFI_ADJUST_CFA_OFFSET -16
992 jz 5f 867 jz 5f
993 jmp iret_exc 868 jmp iret_exc
9945: pushl_cfi $-1 /* orig_ax = -1 => not a system call */ 8695: pushl $-1 /* orig_ax = -1 => not a system call */
995 SAVE_ALL 870 SAVE_ALL
996 jmp ret_from_exception 871 jmp ret_from_exception
997 CFI_ENDPROC
998 872
999.section .fixup,"ax" 873.section .fixup,"ax"
10006: xorl %eax,%eax 8746: xorl %eax,%eax
@@ -1195,34 +1069,28 @@ return_to_handler:
1195 1069
1196#ifdef CONFIG_TRACING 1070#ifdef CONFIG_TRACING
1197ENTRY(trace_page_fault) 1071ENTRY(trace_page_fault)
1198 RING0_EC_FRAME
1199 ASM_CLAC 1072 ASM_CLAC
1200 pushl_cfi $trace_do_page_fault 1073 pushl $trace_do_page_fault
1201 jmp error_code 1074 jmp error_code
1202 CFI_ENDPROC
1203END(trace_page_fault) 1075END(trace_page_fault)
1204#endif 1076#endif
1205 1077
1206ENTRY(page_fault) 1078ENTRY(page_fault)
1207 RING0_EC_FRAME
1208 ASM_CLAC 1079 ASM_CLAC
1209 pushl_cfi $do_page_fault 1080 pushl $do_page_fault
1210 ALIGN 1081 ALIGN
1211error_code: 1082error_code:
1212 /* the function address is in %gs's slot on the stack */ 1083 /* the function address is in %gs's slot on the stack */
1213 pushl_cfi %fs 1084 pushl %fs
1214 /*CFI_REL_OFFSET fs, 0*/ 1085 pushl %es
1215 pushl_cfi %es 1086 pushl %ds
1216 /*CFI_REL_OFFSET es, 0*/ 1087 pushl %eax
1217 pushl_cfi %ds 1088 pushl %ebp
1218 /*CFI_REL_OFFSET ds, 0*/ 1089 pushl %edi
1219 pushl_cfi_reg eax 1090 pushl %esi
1220 pushl_cfi_reg ebp 1091 pushl %edx
1221 pushl_cfi_reg edi 1092 pushl %ecx
1222 pushl_cfi_reg esi 1093 pushl %ebx
1223 pushl_cfi_reg edx
1224 pushl_cfi_reg ecx
1225 pushl_cfi_reg ebx
1226 cld 1094 cld
1227 movl $(__KERNEL_PERCPU), %ecx 1095 movl $(__KERNEL_PERCPU), %ecx
1228 movl %ecx, %fs 1096 movl %ecx, %fs
@@ -1240,7 +1108,6 @@ error_code:
1240 movl %esp,%eax # pt_regs pointer 1108 movl %esp,%eax # pt_regs pointer
1241 call *%edi 1109 call *%edi
1242 jmp ret_from_exception 1110 jmp ret_from_exception
1243 CFI_ENDPROC
1244END(page_fault) 1111END(page_fault)
1245 1112
1246/* 1113/*
@@ -1261,29 +1128,24 @@ END(page_fault)
1261 jne \ok 1128 jne \ok
1262\label: 1129\label:
1263 movl TSS_sysenter_sp0 + \offset(%esp), %esp 1130 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1264 CFI_DEF_CFA esp, 0 1131 pushfl
1265 CFI_UNDEFINED eip 1132 pushl $__KERNEL_CS
1266 pushfl_cfi 1133 pushl $sysenter_past_esp
1267 pushl_cfi $__KERNEL_CS
1268 pushl_cfi $sysenter_past_esp
1269 CFI_REL_OFFSET eip, 0
1270.endm 1134.endm
1271 1135
1272ENTRY(debug) 1136ENTRY(debug)
1273 RING0_INT_FRAME
1274 ASM_CLAC 1137 ASM_CLAC
1275 cmpl $ia32_sysenter_target,(%esp) 1138 cmpl $ia32_sysenter_target,(%esp)
1276 jne debug_stack_correct 1139 jne debug_stack_correct
1277 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn 1140 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1278debug_stack_correct: 1141debug_stack_correct:
1279 pushl_cfi $-1 # mark this as an int 1142 pushl $-1 # mark this as an int
1280 SAVE_ALL 1143 SAVE_ALL
1281 TRACE_IRQS_OFF 1144 TRACE_IRQS_OFF
1282 xorl %edx,%edx # error code 0 1145 xorl %edx,%edx # error code 0
1283 movl %esp,%eax # pt_regs pointer 1146 movl %esp,%eax # pt_regs pointer
1284 call do_debug 1147 call do_debug
1285 jmp ret_from_exception 1148 jmp ret_from_exception
1286 CFI_ENDPROC
1287END(debug) 1149END(debug)
1288 1150
1289/* 1151/*
@@ -1295,45 +1157,40 @@ END(debug)
1295 * fault happened on the sysenter path. 1157 * fault happened on the sysenter path.
1296 */ 1158 */
1297ENTRY(nmi) 1159ENTRY(nmi)
1298 RING0_INT_FRAME
1299 ASM_CLAC 1160 ASM_CLAC
1300#ifdef CONFIG_X86_ESPFIX32 1161#ifdef CONFIG_X86_ESPFIX32
1301 pushl_cfi %eax 1162 pushl %eax
1302 movl %ss, %eax 1163 movl %ss, %eax
1303 cmpw $__ESPFIX_SS, %ax 1164 cmpw $__ESPFIX_SS, %ax
1304 popl_cfi %eax 1165 popl %eax
1305 je nmi_espfix_stack 1166 je nmi_espfix_stack
1306#endif 1167#endif
1307 cmpl $ia32_sysenter_target,(%esp) 1168 cmpl $ia32_sysenter_target,(%esp)
1308 je nmi_stack_fixup 1169 je nmi_stack_fixup
1309 pushl_cfi %eax 1170 pushl %eax
1310 movl %esp,%eax 1171 movl %esp,%eax
1311 /* Do not access memory above the end of our stack page, 1172 /* Do not access memory above the end of our stack page,
1312 * it might not exist. 1173 * it might not exist.
1313 */ 1174 */
1314 andl $(THREAD_SIZE-1),%eax 1175 andl $(THREAD_SIZE-1),%eax
1315 cmpl $(THREAD_SIZE-20),%eax 1176 cmpl $(THREAD_SIZE-20),%eax
1316 popl_cfi %eax 1177 popl %eax
1317 jae nmi_stack_correct 1178 jae nmi_stack_correct
1318 cmpl $ia32_sysenter_target,12(%esp) 1179 cmpl $ia32_sysenter_target,12(%esp)
1319 je nmi_debug_stack_check 1180 je nmi_debug_stack_check
1320nmi_stack_correct: 1181nmi_stack_correct:
1321 /* We have a RING0_INT_FRAME here */ 1182 pushl %eax
1322 pushl_cfi %eax
1323 SAVE_ALL 1183 SAVE_ALL
1324 xorl %edx,%edx # zero error code 1184 xorl %edx,%edx # zero error code
1325 movl %esp,%eax # pt_regs pointer 1185 movl %esp,%eax # pt_regs pointer
1326 call do_nmi 1186 call do_nmi
1327 jmp restore_all_notrace 1187 jmp restore_all_notrace
1328 CFI_ENDPROC
1329 1188
1330nmi_stack_fixup: 1189nmi_stack_fixup:
1331 RING0_INT_FRAME
1332 FIX_STACK 12, nmi_stack_correct, 1 1190 FIX_STACK 12, nmi_stack_correct, 1
1333 jmp nmi_stack_correct 1191 jmp nmi_stack_correct
1334 1192
1335nmi_debug_stack_check: 1193nmi_debug_stack_check:
1336 /* We have a RING0_INT_FRAME here */
1337 cmpw $__KERNEL_CS,16(%esp) 1194 cmpw $__KERNEL_CS,16(%esp)
1338 jne nmi_stack_correct 1195 jne nmi_stack_correct
1339 cmpl $debug,(%esp) 1196 cmpl $debug,(%esp)
@@ -1345,57 +1202,48 @@ nmi_debug_stack_check:
1345 1202
1346#ifdef CONFIG_X86_ESPFIX32 1203#ifdef CONFIG_X86_ESPFIX32
1347nmi_espfix_stack: 1204nmi_espfix_stack:
1348 /* We have a RING0_INT_FRAME here. 1205 /*
1349 *
1350 * create the pointer to lss back 1206 * create the pointer to lss back
1351 */ 1207 */
1352 pushl_cfi %ss 1208 pushl %ss
1353 pushl_cfi %esp 1209 pushl %esp
1354 addl $4, (%esp) 1210 addl $4, (%esp)
1355 /* copy the iret frame of 12 bytes */ 1211 /* copy the iret frame of 12 bytes */
1356 .rept 3 1212 .rept 3
1357 pushl_cfi 16(%esp) 1213 pushl 16(%esp)
1358 .endr 1214 .endr
1359 pushl_cfi %eax 1215 pushl %eax
1360 SAVE_ALL 1216 SAVE_ALL
1361 FIXUP_ESPFIX_STACK # %eax == %esp 1217 FIXUP_ESPFIX_STACK # %eax == %esp
1362 xorl %edx,%edx # zero error code 1218 xorl %edx,%edx # zero error code
1363 call do_nmi 1219 call do_nmi
1364 RESTORE_REGS 1220 RESTORE_REGS
1365 lss 12+4(%esp), %esp # back to espfix stack 1221 lss 12+4(%esp), %esp # back to espfix stack
1366 CFI_ADJUST_CFA_OFFSET -24
1367 jmp irq_return 1222 jmp irq_return
1368#endif 1223#endif
1369 CFI_ENDPROC
1370END(nmi) 1224END(nmi)
1371 1225
1372ENTRY(int3) 1226ENTRY(int3)
1373 RING0_INT_FRAME
1374 ASM_CLAC 1227 ASM_CLAC
1375 pushl_cfi $-1 # mark this as an int 1228 pushl $-1 # mark this as an int
1376 SAVE_ALL 1229 SAVE_ALL
1377 TRACE_IRQS_OFF 1230 TRACE_IRQS_OFF
1378 xorl %edx,%edx # zero error code 1231 xorl %edx,%edx # zero error code
1379 movl %esp,%eax # pt_regs pointer 1232 movl %esp,%eax # pt_regs pointer
1380 call do_int3 1233 call do_int3
1381 jmp ret_from_exception 1234 jmp ret_from_exception
1382 CFI_ENDPROC
1383END(int3) 1235END(int3)
1384 1236
1385ENTRY(general_protection) 1237ENTRY(general_protection)
1386 RING0_EC_FRAME 1238 pushl $do_general_protection
1387 pushl_cfi $do_general_protection
1388 jmp error_code 1239 jmp error_code
1389 CFI_ENDPROC
1390END(general_protection) 1240END(general_protection)
1391 1241
1392#ifdef CONFIG_KVM_GUEST 1242#ifdef CONFIG_KVM_GUEST
1393ENTRY(async_page_fault) 1243ENTRY(async_page_fault)
1394 RING0_EC_FRAME
1395 ASM_CLAC 1244 ASM_CLAC
1396 pushl_cfi $do_async_page_fault 1245 pushl $do_async_page_fault
1397 jmp error_code 1246 jmp error_code
1398 CFI_ENDPROC
1399END(async_page_fault) 1247END(async_page_fault)
1400#endif 1248#endif
1401 1249
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 47b95813dc37..b84cec50c8cf 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -19,8 +19,6 @@
19 * at the top of the kernel process stack. 19 * at the top of the kernel process stack.
20 * 20 *
21 * Some macro usage: 21 * Some macro usage:
22 * - CFI macros are used to generate dwarf2 unwind information for better
23 * backtraces. They don't change any code.
24 * - ENTRY/END Define functions in the symbol table. 22 * - ENTRY/END Define functions in the symbol table.
25 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging. 23 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
26 * - idtentry - Define exception entry points. 24 * - idtentry - Define exception entry points.
@@ -30,7 +28,6 @@
30#include <asm/segment.h> 28#include <asm/segment.h>
31#include <asm/cache.h> 29#include <asm/cache.h>
32#include <asm/errno.h> 30#include <asm/errno.h>
33#include <asm/dwarf2.h>
34#include <asm/calling.h> 31#include <asm/calling.h>
35#include <asm/asm-offsets.h> 32#include <asm/asm-offsets.h>
36#include <asm/msr.h> 33#include <asm/msr.h>
@@ -113,61 +110,6 @@ ENDPROC(native_usergs_sysret64)
113#endif 110#endif
114 111
115/* 112/*
116 * empty frame
117 */
118 .macro EMPTY_FRAME start=1 offset=0
119 .if \start
120 CFI_STARTPROC simple
121 CFI_SIGNAL_FRAME
122 CFI_DEF_CFA rsp,8+\offset
123 .else
124 CFI_DEF_CFA_OFFSET 8+\offset
125 .endif
126 .endm
127
128/*
129 * initial frame state for interrupts (and exceptions without error code)
130 */
131 .macro INTR_FRAME start=1 offset=0
132 EMPTY_FRAME \start, 5*8+\offset
133 /*CFI_REL_OFFSET ss, 4*8+\offset*/
134 CFI_REL_OFFSET rsp, 3*8+\offset
135 /*CFI_REL_OFFSET rflags, 2*8+\offset*/
136 /*CFI_REL_OFFSET cs, 1*8+\offset*/
137 CFI_REL_OFFSET rip, 0*8+\offset
138 .endm
139
140/*
141 * initial frame state for exceptions with error code (and interrupts
142 * with vector already pushed)
143 */
144 .macro XCPT_FRAME start=1 offset=0
145 INTR_FRAME \start, 1*8+\offset
146 .endm
147
148/*
149 * frame that enables passing a complete pt_regs to a C function.
150 */
151 .macro DEFAULT_FRAME start=1 offset=0
152 XCPT_FRAME \start, ORIG_RAX+\offset
153 CFI_REL_OFFSET rdi, RDI+\offset
154 CFI_REL_OFFSET rsi, RSI+\offset
155 CFI_REL_OFFSET rdx, RDX+\offset
156 CFI_REL_OFFSET rcx, RCX+\offset
157 CFI_REL_OFFSET rax, RAX+\offset
158 CFI_REL_OFFSET r8, R8+\offset
159 CFI_REL_OFFSET r9, R9+\offset
160 CFI_REL_OFFSET r10, R10+\offset
161 CFI_REL_OFFSET r11, R11+\offset
162 CFI_REL_OFFSET rbx, RBX+\offset
163 CFI_REL_OFFSET rbp, RBP+\offset
164 CFI_REL_OFFSET r12, R12+\offset
165 CFI_REL_OFFSET r13, R13+\offset
166 CFI_REL_OFFSET r14, R14+\offset
167 CFI_REL_OFFSET r15, R15+\offset
168 .endm
169
170/*
171 * 64bit SYSCALL instruction entry. Up to 6 arguments in registers. 113 * 64bit SYSCALL instruction entry. Up to 6 arguments in registers.
172 * 114 *
173 * 64bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, 115 * 64bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
@@ -196,12 +138,6 @@ ENDPROC(native_usergs_sysret64)
196 */ 138 */
197 139
198ENTRY(system_call) 140ENTRY(system_call)
199 CFI_STARTPROC simple
200 CFI_SIGNAL_FRAME
201 CFI_DEF_CFA rsp,0
202 CFI_REGISTER rip,rcx
203 /*CFI_REGISTER rflags,r11*/
204
205 /* 141 /*
206 * Interrupts are off on entry. 142 * Interrupts are off on entry.
207 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, 143 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
@@ -219,8 +155,8 @@ GLOBAL(system_call_after_swapgs)
219 movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp 155 movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp
220 156
221 /* Construct struct pt_regs on stack */ 157 /* Construct struct pt_regs on stack */
222 pushq_cfi $__USER_DS /* pt_regs->ss */ 158 pushq $__USER_DS /* pt_regs->ss */
223 pushq_cfi PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ 159 pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
224 /* 160 /*
225 * Re-enable interrupts. 161 * Re-enable interrupts.
226 * We use 'rsp_scratch' as a scratch space, hence irq-off block above 162 * We use 'rsp_scratch' as a scratch space, hence irq-off block above
@@ -229,22 +165,20 @@ GLOBAL(system_call_after_swapgs)
229 * with using rsp_scratch: 165 * with using rsp_scratch:
230 */ 166 */
231 ENABLE_INTERRUPTS(CLBR_NONE) 167 ENABLE_INTERRUPTS(CLBR_NONE)
232 pushq_cfi %r11 /* pt_regs->flags */ 168 pushq %r11 /* pt_regs->flags */
233 pushq_cfi $__USER_CS /* pt_regs->cs */ 169 pushq $__USER_CS /* pt_regs->cs */
234 pushq_cfi %rcx /* pt_regs->ip */ 170 pushq %rcx /* pt_regs->ip */
235 CFI_REL_OFFSET rip,0 171 pushq %rax /* pt_regs->orig_ax */
236 pushq_cfi_reg rax /* pt_regs->orig_ax */ 172 pushq %rdi /* pt_regs->di */
237 pushq_cfi_reg rdi /* pt_regs->di */ 173 pushq %rsi /* pt_regs->si */
238 pushq_cfi_reg rsi /* pt_regs->si */ 174 pushq %rdx /* pt_regs->dx */
239 pushq_cfi_reg rdx /* pt_regs->dx */ 175 pushq %rcx /* pt_regs->cx */
240 pushq_cfi_reg rcx /* pt_regs->cx */ 176 pushq $-ENOSYS /* pt_regs->ax */
241 pushq_cfi $-ENOSYS /* pt_regs->ax */ 177 pushq %r8 /* pt_regs->r8 */
242 pushq_cfi_reg r8 /* pt_regs->r8 */ 178 pushq %r9 /* pt_regs->r9 */
243 pushq_cfi_reg r9 /* pt_regs->r9 */ 179 pushq %r10 /* pt_regs->r10 */
244 pushq_cfi_reg r10 /* pt_regs->r10 */ 180 pushq %r11 /* pt_regs->r11 */
245 pushq_cfi_reg r11 /* pt_regs->r11 */
246 sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */ 181 sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */
247 CFI_ADJUST_CFA_OFFSET 6*8
248 182
249 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 183 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
250 jnz tracesys 184 jnz tracesys
@@ -282,13 +216,9 @@ system_call_fastpath:
282 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 216 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
283 jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */ 217 jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */
284 218
285 CFI_REMEMBER_STATE
286
287 RESTORE_C_REGS_EXCEPT_RCX_R11 219 RESTORE_C_REGS_EXCEPT_RCX_R11
288 movq RIP(%rsp),%rcx 220 movq RIP(%rsp),%rcx
289 CFI_REGISTER rip,rcx
290 movq EFLAGS(%rsp),%r11 221 movq EFLAGS(%rsp),%r11
291 /*CFI_REGISTER rflags,r11*/
292 movq RSP(%rsp),%rsp 222 movq RSP(%rsp),%rsp
293 /* 223 /*
294 * 64bit SYSRET restores rip from rcx, 224 * 64bit SYSRET restores rip from rcx,
@@ -307,8 +237,6 @@ system_call_fastpath:
307 */ 237 */
308 USERGS_SYSRET64 238 USERGS_SYSRET64
309 239
310 CFI_RESTORE_STATE
311
312 /* Do syscall entry tracing */ 240 /* Do syscall entry tracing */
313tracesys: 241tracesys:
314 movq %rsp, %rdi 242 movq %rsp, %rdi
@@ -374,9 +302,9 @@ int_careful:
374 jnc int_very_careful 302 jnc int_very_careful
375 TRACE_IRQS_ON 303 TRACE_IRQS_ON
376 ENABLE_INTERRUPTS(CLBR_NONE) 304 ENABLE_INTERRUPTS(CLBR_NONE)
377 pushq_cfi %rdi 305 pushq %rdi
378 SCHEDULE_USER 306 SCHEDULE_USER
379 popq_cfi %rdi 307 popq %rdi
380 DISABLE_INTERRUPTS(CLBR_NONE) 308 DISABLE_INTERRUPTS(CLBR_NONE)
381 TRACE_IRQS_OFF 309 TRACE_IRQS_OFF
382 jmp int_with_check 310 jmp int_with_check
@@ -389,10 +317,10 @@ int_very_careful:
389 /* Check for syscall exit trace */ 317 /* Check for syscall exit trace */
390 testl $_TIF_WORK_SYSCALL_EXIT,%edx 318 testl $_TIF_WORK_SYSCALL_EXIT,%edx
391 jz int_signal 319 jz int_signal
392 pushq_cfi %rdi 320 pushq %rdi
393 leaq 8(%rsp),%rdi # &ptregs -> arg1 321 leaq 8(%rsp),%rdi # &ptregs -> arg1
394 call syscall_trace_leave 322 call syscall_trace_leave
395 popq_cfi %rdi 323 popq %rdi
396 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi 324 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
397 jmp int_restore_rest 325 jmp int_restore_rest
398 326
@@ -475,27 +403,21 @@ syscall_return:
475 * perf profiles. Nothing jumps here. 403 * perf profiles. Nothing jumps here.
476 */ 404 */
477syscall_return_via_sysret: 405syscall_return_via_sysret:
478 CFI_REMEMBER_STATE
479 /* rcx and r11 are already restored (see code above) */ 406 /* rcx and r11 are already restored (see code above) */
480 RESTORE_C_REGS_EXCEPT_RCX_R11 407 RESTORE_C_REGS_EXCEPT_RCX_R11
481 movq RSP(%rsp),%rsp 408 movq RSP(%rsp),%rsp
482 USERGS_SYSRET64 409 USERGS_SYSRET64
483 CFI_RESTORE_STATE
484 410
485opportunistic_sysret_failed: 411opportunistic_sysret_failed:
486 SWAPGS 412 SWAPGS
487 jmp restore_c_regs_and_iret 413 jmp restore_c_regs_and_iret
488 CFI_ENDPROC
489END(system_call) 414END(system_call)
490 415
491 416
492 .macro FORK_LIKE func 417 .macro FORK_LIKE func
493ENTRY(stub_\func) 418ENTRY(stub_\func)
494 CFI_STARTPROC
495 DEFAULT_FRAME 0, 8 /* offset 8: return address */
496 SAVE_EXTRA_REGS 8 419 SAVE_EXTRA_REGS 8
497 jmp sys_\func 420 jmp sys_\func
498 CFI_ENDPROC
499END(stub_\func) 421END(stub_\func)
500 .endm 422 .endm
501 423
@@ -504,8 +426,6 @@ END(stub_\func)
504 FORK_LIKE vfork 426 FORK_LIKE vfork
505 427
506ENTRY(stub_execve) 428ENTRY(stub_execve)
507 CFI_STARTPROC
508 DEFAULT_FRAME 0, 8
509 call sys_execve 429 call sys_execve
510return_from_execve: 430return_from_execve:
511 testl %eax, %eax 431 testl %eax, %eax
@@ -515,11 +435,9 @@ return_from_execve:
5151: 4351:
516 /* must use IRET code path (pt_regs->cs may have changed) */ 436 /* must use IRET code path (pt_regs->cs may have changed) */
517 addq $8, %rsp 437 addq $8, %rsp
518 CFI_ADJUST_CFA_OFFSET -8
519 ZERO_EXTRA_REGS 438 ZERO_EXTRA_REGS
520 movq %rax,RAX(%rsp) 439 movq %rax,RAX(%rsp)
521 jmp int_ret_from_sys_call 440 jmp int_ret_from_sys_call
522 CFI_ENDPROC
523END(stub_execve) 441END(stub_execve)
524/* 442/*
525 * Remaining execve stubs are only 7 bytes long. 443 * Remaining execve stubs are only 7 bytes long.
@@ -527,32 +445,23 @@ END(stub_execve)
527 */ 445 */
528 .align 8 446 .align 8
529GLOBAL(stub_execveat) 447GLOBAL(stub_execveat)
530 CFI_STARTPROC
531 DEFAULT_FRAME 0, 8
532 call sys_execveat 448 call sys_execveat
533 jmp return_from_execve 449 jmp return_from_execve
534 CFI_ENDPROC
535END(stub_execveat) 450END(stub_execveat)
536 451
537#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION) 452#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
538 .align 8 453 .align 8
539GLOBAL(stub_x32_execve) 454GLOBAL(stub_x32_execve)
540GLOBAL(stub32_execve) 455GLOBAL(stub32_execve)
541 CFI_STARTPROC
542 DEFAULT_FRAME 0, 8
543 call compat_sys_execve 456 call compat_sys_execve
544 jmp return_from_execve 457 jmp return_from_execve
545 CFI_ENDPROC
546END(stub32_execve) 458END(stub32_execve)
547END(stub_x32_execve) 459END(stub_x32_execve)
548 .align 8 460 .align 8
549GLOBAL(stub_x32_execveat) 461GLOBAL(stub_x32_execveat)
550GLOBAL(stub32_execveat) 462GLOBAL(stub32_execveat)
551 CFI_STARTPROC
552 DEFAULT_FRAME 0, 8
553 call compat_sys_execveat 463 call compat_sys_execveat
554 jmp return_from_execve 464 jmp return_from_execve
555 CFI_ENDPROC
556END(stub32_execveat) 465END(stub32_execveat)
557END(stub_x32_execveat) 466END(stub_x32_execveat)
558#endif 467#endif
@@ -562,8 +471,6 @@ END(stub_x32_execveat)
562 * This cannot be done with SYSRET, so use the IRET return path instead. 471 * This cannot be done with SYSRET, so use the IRET return path instead.
563 */ 472 */
564ENTRY(stub_rt_sigreturn) 473ENTRY(stub_rt_sigreturn)
565 CFI_STARTPROC
566 DEFAULT_FRAME 0, 8
567 /* 474 /*
568 * SAVE_EXTRA_REGS result is not normally needed: 475 * SAVE_EXTRA_REGS result is not normally needed:
569 * sigreturn overwrites all pt_regs->GPREGS. 476 * sigreturn overwrites all pt_regs->GPREGS.
@@ -575,21 +482,16 @@ ENTRY(stub_rt_sigreturn)
575 call sys_rt_sigreturn 482 call sys_rt_sigreturn
576return_from_stub: 483return_from_stub:
577 addq $8, %rsp 484 addq $8, %rsp
578 CFI_ADJUST_CFA_OFFSET -8
579 RESTORE_EXTRA_REGS 485 RESTORE_EXTRA_REGS
580 movq %rax,RAX(%rsp) 486 movq %rax,RAX(%rsp)
581 jmp int_ret_from_sys_call 487 jmp int_ret_from_sys_call
582 CFI_ENDPROC
583END(stub_rt_sigreturn) 488END(stub_rt_sigreturn)
584 489
585#ifdef CONFIG_X86_X32_ABI 490#ifdef CONFIG_X86_X32_ABI
586ENTRY(stub_x32_rt_sigreturn) 491ENTRY(stub_x32_rt_sigreturn)
587 CFI_STARTPROC
588 DEFAULT_FRAME 0, 8
589 SAVE_EXTRA_REGS 8 492 SAVE_EXTRA_REGS 8
590 call sys32_x32_rt_sigreturn 493 call sys32_x32_rt_sigreturn
591 jmp return_from_stub 494 jmp return_from_stub
592 CFI_ENDPROC
593END(stub_x32_rt_sigreturn) 495END(stub_x32_rt_sigreturn)
594#endif 496#endif
595 497
@@ -599,12 +501,11 @@ END(stub_x32_rt_sigreturn)
599 * rdi: prev task we switched from 501 * rdi: prev task we switched from
600 */ 502 */
601ENTRY(ret_from_fork) 503ENTRY(ret_from_fork)
602 DEFAULT_FRAME
603 504
604 LOCK ; btr $TIF_FORK,TI_flags(%r8) 505 LOCK ; btr $TIF_FORK,TI_flags(%r8)
605 506
606 pushq_cfi $0x0002 507 pushq $0x0002
607 popfq_cfi # reset kernel eflags 508 popfq # reset kernel eflags
608 509
609 call schedule_tail # rdi: 'prev' task parameter 510 call schedule_tail # rdi: 'prev' task parameter
610 511
@@ -628,7 +529,6 @@ ENTRY(ret_from_fork)
628 movl $0, RAX(%rsp) 529 movl $0, RAX(%rsp)
629 RESTORE_EXTRA_REGS 530 RESTORE_EXTRA_REGS
630 jmp int_ret_from_sys_call 531 jmp int_ret_from_sys_call
631 CFI_ENDPROC
632END(ret_from_fork) 532END(ret_from_fork)
633 533
634/* 534/*
@@ -637,16 +537,13 @@ END(ret_from_fork)
637 */ 537 */
638 .align 8 538 .align 8
639ENTRY(irq_entries_start) 539ENTRY(irq_entries_start)
640 INTR_FRAME
641 vector=FIRST_EXTERNAL_VECTOR 540 vector=FIRST_EXTERNAL_VECTOR
642 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) 541 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
643 pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */ 542 pushq $(~vector+0x80) /* Note: always in signed byte range */
644 vector=vector+1 543 vector=vector+1
645 jmp common_interrupt 544 jmp common_interrupt
646 CFI_ADJUST_CFA_OFFSET -8
647 .align 8 545 .align 8
648 .endr 546 .endr
649 CFI_ENDPROC
650END(irq_entries_start) 547END(irq_entries_start)
651 548
652/* 549/*
@@ -688,17 +585,7 @@ END(irq_entries_start)
688 movq %rsp, %rsi 585 movq %rsp, %rsi
689 incl PER_CPU_VAR(irq_count) 586 incl PER_CPU_VAR(irq_count)
690 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp 587 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
691 CFI_DEF_CFA_REGISTER rsi
692 pushq %rsi 588 pushq %rsi
693 /*
694 * For debugger:
695 * "CFA (Current Frame Address) is the value on stack + offset"
696 */
697 CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
698 0x77 /* DW_OP_breg7 (rsp) */, 0, \
699 0x06 /* DW_OP_deref */, \
700 0x08 /* DW_OP_const1u */, SIZEOF_PTREGS-RBP, \
701 0x22 /* DW_OP_plus */
702 /* We entered an interrupt context - irqs are off: */ 589 /* We entered an interrupt context - irqs are off: */
703 TRACE_IRQS_OFF 590 TRACE_IRQS_OFF
704 591
@@ -711,7 +598,6 @@ END(irq_entries_start)
711 */ 598 */
712 .p2align CONFIG_X86_L1_CACHE_SHIFT 599 .p2align CONFIG_X86_L1_CACHE_SHIFT
713common_interrupt: 600common_interrupt:
714 XCPT_FRAME
715 ASM_CLAC 601 ASM_CLAC
716 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ 602 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
717 interrupt do_IRQ 603 interrupt do_IRQ
@@ -723,11 +609,8 @@ ret_from_intr:
723 609
724 /* Restore saved previous stack */ 610 /* Restore saved previous stack */
725 popq %rsi 611 popq %rsi
726 CFI_DEF_CFA rsi,SIZEOF_PTREGS-RBP /* reg/off reset after def_cfa_expr */
727 /* return code expects complete pt_regs - adjust rsp accordingly: */ 612 /* return code expects complete pt_regs - adjust rsp accordingly: */
728 leaq -RBP(%rsi),%rsp 613 leaq -RBP(%rsi),%rsp
729 CFI_DEF_CFA_REGISTER rsp
730 CFI_ADJUST_CFA_OFFSET RBP
731 614
732 testb $3, CS(%rsp) 615 testb $3, CS(%rsp)
733 jz retint_kernel 616 jz retint_kernel
@@ -743,7 +626,6 @@ retint_check:
743 LOCKDEP_SYS_EXIT_IRQ 626 LOCKDEP_SYS_EXIT_IRQ
744 movl TI_flags(%rcx),%edx 627 movl TI_flags(%rcx),%edx
745 andl %edi,%edx 628 andl %edi,%edx
746 CFI_REMEMBER_STATE
747 jnz retint_careful 629 jnz retint_careful
748 630
749retint_swapgs: /* return to user-space */ 631retint_swapgs: /* return to user-space */
@@ -807,8 +689,8 @@ native_irq_return_iret:
807 689
808#ifdef CONFIG_X86_ESPFIX64 690#ifdef CONFIG_X86_ESPFIX64
809native_irq_return_ldt: 691native_irq_return_ldt:
810 pushq_cfi %rax 692 pushq %rax
811 pushq_cfi %rdi 693 pushq %rdi
812 SWAPGS 694 SWAPGS
813 movq PER_CPU_VAR(espfix_waddr),%rdi 695 movq PER_CPU_VAR(espfix_waddr),%rdi
814 movq %rax,(0*8)(%rdi) /* RAX */ 696 movq %rax,(0*8)(%rdi) /* RAX */
@@ -823,24 +705,23 @@ native_irq_return_ldt:
823 movq (5*8)(%rsp),%rax /* RSP */ 705 movq (5*8)(%rsp),%rax /* RSP */
824 movq %rax,(4*8)(%rdi) 706 movq %rax,(4*8)(%rdi)
825 andl $0xffff0000,%eax 707 andl $0xffff0000,%eax
826 popq_cfi %rdi 708 popq %rdi
827 orq PER_CPU_VAR(espfix_stack),%rax 709 orq PER_CPU_VAR(espfix_stack),%rax
828 SWAPGS 710 SWAPGS
829 movq %rax,%rsp 711 movq %rax,%rsp
830 popq_cfi %rax 712 popq %rax
831 jmp native_irq_return_iret 713 jmp native_irq_return_iret
832#endif 714#endif
833 715
834 /* edi: workmask, edx: work */ 716 /* edi: workmask, edx: work */
835retint_careful: 717retint_careful:
836 CFI_RESTORE_STATE
837 bt $TIF_NEED_RESCHED,%edx 718 bt $TIF_NEED_RESCHED,%edx
838 jnc retint_signal 719 jnc retint_signal
839 TRACE_IRQS_ON 720 TRACE_IRQS_ON
840 ENABLE_INTERRUPTS(CLBR_NONE) 721 ENABLE_INTERRUPTS(CLBR_NONE)
841 pushq_cfi %rdi 722 pushq %rdi
842 SCHEDULE_USER 723 SCHEDULE_USER
843 popq_cfi %rdi 724 popq %rdi
844 GET_THREAD_INFO(%rcx) 725 GET_THREAD_INFO(%rcx)
845 DISABLE_INTERRUPTS(CLBR_NONE) 726 DISABLE_INTERRUPTS(CLBR_NONE)
846 TRACE_IRQS_OFF 727 TRACE_IRQS_OFF
@@ -862,7 +743,6 @@ retint_signal:
862 GET_THREAD_INFO(%rcx) 743 GET_THREAD_INFO(%rcx)
863 jmp retint_with_reschedule 744 jmp retint_with_reschedule
864 745
865 CFI_ENDPROC
866END(common_interrupt) 746END(common_interrupt)
867 747
868/* 748/*
@@ -870,13 +750,11 @@ END(common_interrupt)
870 */ 750 */
871.macro apicinterrupt3 num sym do_sym 751.macro apicinterrupt3 num sym do_sym
872ENTRY(\sym) 752ENTRY(\sym)
873 INTR_FRAME
874 ASM_CLAC 753 ASM_CLAC
875 pushq_cfi $~(\num) 754 pushq $~(\num)
876.Lcommon_\sym: 755.Lcommon_\sym:
877 interrupt \do_sym 756 interrupt \do_sym
878 jmp ret_from_intr 757 jmp ret_from_intr
879 CFI_ENDPROC
880END(\sym) 758END(\sym)
881.endm 759.endm
882 760
@@ -959,24 +837,17 @@ ENTRY(\sym)
959 .error "using shift_ist requires paranoid=1" 837 .error "using shift_ist requires paranoid=1"
960 .endif 838 .endif
961 839
962 .if \has_error_code
963 XCPT_FRAME
964 .else
965 INTR_FRAME
966 .endif
967
968 ASM_CLAC 840 ASM_CLAC
969 PARAVIRT_ADJUST_EXCEPTION_FRAME 841 PARAVIRT_ADJUST_EXCEPTION_FRAME
970 842
971 .ifeq \has_error_code 843 .ifeq \has_error_code
972 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 844 pushq $-1 /* ORIG_RAX: no syscall to restart */
973 .endif 845 .endif
974 846
975 ALLOC_PT_GPREGS_ON_STACK 847 ALLOC_PT_GPREGS_ON_STACK
976 848
977 .if \paranoid 849 .if \paranoid
978 .if \paranoid == 1 850 .if \paranoid == 1
979 CFI_REMEMBER_STATE
980 testb $3, CS(%rsp) /* If coming from userspace, switch */ 851 testb $3, CS(%rsp) /* If coming from userspace, switch */
981 jnz 1f /* stacks. */ 852 jnz 1f /* stacks. */
982 .endif 853 .endif
@@ -986,8 +857,6 @@ ENTRY(\sym)
986 .endif 857 .endif
987 /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ 858 /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
988 859
989 DEFAULT_FRAME 0
990
991 .if \paranoid 860 .if \paranoid
992 .if \shift_ist != -1 861 .if \shift_ist != -1
993 TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ 862 TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */
@@ -1023,7 +892,6 @@ ENTRY(\sym)
1023 .endif 892 .endif
1024 893
1025 .if \paranoid == 1 894 .if \paranoid == 1
1026 CFI_RESTORE_STATE
1027 /* 895 /*
1028 * Paranoid entry from userspace. Switch stacks and treat it 896 * Paranoid entry from userspace. Switch stacks and treat it
1029 * as a normal entry. This means that paranoid handlers 897 * as a normal entry. This means that paranoid handlers
@@ -1032,7 +900,6 @@ ENTRY(\sym)
10321: 9001:
1033 call error_entry 901 call error_entry
1034 902
1035 DEFAULT_FRAME 0
1036 903
1037 movq %rsp,%rdi /* pt_regs pointer */ 904 movq %rsp,%rdi /* pt_regs pointer */
1038 call sync_regs 905 call sync_regs
@@ -1051,8 +918,6 @@ ENTRY(\sym)
1051 918
1052 jmp error_exit /* %ebx: no swapgs flag */ 919 jmp error_exit /* %ebx: no swapgs flag */
1053 .endif 920 .endif
1054
1055 CFI_ENDPROC
1056END(\sym) 921END(\sym)
1057.endm 922.endm
1058 923
@@ -1085,17 +950,15 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
1085 /* Reload gs selector with exception handling */ 950 /* Reload gs selector with exception handling */
1086 /* edi: new selector */ 951 /* edi: new selector */
1087ENTRY(native_load_gs_index) 952ENTRY(native_load_gs_index)
1088 CFI_STARTPROC 953 pushfq
1089 pushfq_cfi
1090 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) 954 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
1091 SWAPGS 955 SWAPGS
1092gs_change: 956gs_change:
1093 movl %edi,%gs 957 movl %edi,%gs
10942: mfence /* workaround */ 9582: mfence /* workaround */
1095 SWAPGS 959 SWAPGS
1096 popfq_cfi 960 popfq
1097 ret 961 ret
1098 CFI_ENDPROC
1099END(native_load_gs_index) 962END(native_load_gs_index)
1100 963
1101 _ASM_EXTABLE(gs_change,bad_gs) 964 _ASM_EXTABLE(gs_change,bad_gs)
@@ -1110,22 +973,15 @@ bad_gs:
1110 973
1111/* Call softirq on interrupt stack. Interrupts are off. */ 974/* Call softirq on interrupt stack. Interrupts are off. */
1112ENTRY(do_softirq_own_stack) 975ENTRY(do_softirq_own_stack)
1113 CFI_STARTPROC 976 pushq %rbp
1114 pushq_cfi %rbp
1115 CFI_REL_OFFSET rbp,0
1116 mov %rsp,%rbp 977 mov %rsp,%rbp
1117 CFI_DEF_CFA_REGISTER rbp
1118 incl PER_CPU_VAR(irq_count) 978 incl PER_CPU_VAR(irq_count)
1119 cmove PER_CPU_VAR(irq_stack_ptr),%rsp 979 cmove PER_CPU_VAR(irq_stack_ptr),%rsp
1120 push %rbp # backlink for old unwinder 980 push %rbp # backlink for old unwinder
1121 call __do_softirq 981 call __do_softirq
1122 leaveq 982 leaveq
1123 CFI_RESTORE rbp
1124 CFI_DEF_CFA_REGISTER rsp
1125 CFI_ADJUST_CFA_OFFSET -8
1126 decl PER_CPU_VAR(irq_count) 983 decl PER_CPU_VAR(irq_count)
1127 ret 984 ret
1128 CFI_ENDPROC
1129END(do_softirq_own_stack) 985END(do_softirq_own_stack)
1130 986
1131#ifdef CONFIG_XEN 987#ifdef CONFIG_XEN
@@ -1145,28 +1001,22 @@ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
1145 * activation and restart the handler using the previous one. 1001 * activation and restart the handler using the previous one.
1146 */ 1002 */
1147ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) 1003ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1148 CFI_STARTPROC
1149/* 1004/*
1150 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 1005 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1151 * see the correct pointer to the pt_regs 1006 * see the correct pointer to the pt_regs
1152 */ 1007 */
1153 movq %rdi, %rsp # we don't return, adjust the stack frame 1008 movq %rdi, %rsp # we don't return, adjust the stack frame
1154 CFI_ENDPROC
1155 DEFAULT_FRAME
115611: incl PER_CPU_VAR(irq_count) 100911: incl PER_CPU_VAR(irq_count)
1157 movq %rsp,%rbp 1010 movq %rsp,%rbp
1158 CFI_DEF_CFA_REGISTER rbp
1159 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp 1011 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
1160 pushq %rbp # backlink for old unwinder 1012 pushq %rbp # backlink for old unwinder
1161 call xen_evtchn_do_upcall 1013 call xen_evtchn_do_upcall
1162 popq %rsp 1014 popq %rsp
1163 CFI_DEF_CFA_REGISTER rsp
1164 decl PER_CPU_VAR(irq_count) 1015 decl PER_CPU_VAR(irq_count)
1165#ifndef CONFIG_PREEMPT 1016#ifndef CONFIG_PREEMPT
1166 call xen_maybe_preempt_hcall 1017 call xen_maybe_preempt_hcall
1167#endif 1018#endif
1168 jmp error_exit 1019 jmp error_exit
1169 CFI_ENDPROC
1170END(xen_do_hypervisor_callback) 1020END(xen_do_hypervisor_callback)
1171 1021
1172/* 1022/*
@@ -1183,16 +1033,8 @@ END(xen_do_hypervisor_callback)
1183 * with its current contents: any discrepancy means we in category 1. 1033 * with its current contents: any discrepancy means we in category 1.
1184 */ 1034 */
1185ENTRY(xen_failsafe_callback) 1035ENTRY(xen_failsafe_callback)
1186 INTR_FRAME 1 (6*8)
1187 /*CFI_REL_OFFSET gs,GS*/
1188 /*CFI_REL_OFFSET fs,FS*/
1189 /*CFI_REL_OFFSET es,ES*/
1190 /*CFI_REL_OFFSET ds,DS*/
1191 CFI_REL_OFFSET r11,8
1192 CFI_REL_OFFSET rcx,0
1193 movl %ds,%ecx 1036 movl %ds,%ecx
1194 cmpw %cx,0x10(%rsp) 1037 cmpw %cx,0x10(%rsp)
1195 CFI_REMEMBER_STATE
1196 jne 1f 1038 jne 1f
1197 movl %es,%ecx 1039 movl %es,%ecx
1198 cmpw %cx,0x18(%rsp) 1040 cmpw %cx,0x18(%rsp)
@@ -1205,29 +1047,21 @@ ENTRY(xen_failsafe_callback)
1205 jne 1f 1047 jne 1f
1206 /* All segments match their saved values => Category 2 (Bad IRET). */ 1048 /* All segments match their saved values => Category 2 (Bad IRET). */
1207 movq (%rsp),%rcx 1049 movq (%rsp),%rcx
1208 CFI_RESTORE rcx
1209 movq 8(%rsp),%r11 1050 movq 8(%rsp),%r11
1210 CFI_RESTORE r11
1211 addq $0x30,%rsp 1051 addq $0x30,%rsp
1212 CFI_ADJUST_CFA_OFFSET -0x30 1052 pushq $0 /* RIP */
1213 pushq_cfi $0 /* RIP */ 1053 pushq %r11
1214 pushq_cfi %r11 1054 pushq %rcx
1215 pushq_cfi %rcx
1216 jmp general_protection 1055 jmp general_protection
1217 CFI_RESTORE_STATE
12181: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 10561: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1219 movq (%rsp),%rcx 1057 movq (%rsp),%rcx
1220 CFI_RESTORE rcx
1221 movq 8(%rsp),%r11 1058 movq 8(%rsp),%r11
1222 CFI_RESTORE r11
1223 addq $0x30,%rsp 1059 addq $0x30,%rsp
1224 CFI_ADJUST_CFA_OFFSET -0x30 1060 pushq $-1 /* orig_ax = -1 => not a system call */
1225 pushq_cfi $-1 /* orig_ax = -1 => not a system call */
1226 ALLOC_PT_GPREGS_ON_STACK 1061 ALLOC_PT_GPREGS_ON_STACK
1227 SAVE_C_REGS 1062 SAVE_C_REGS
1228 SAVE_EXTRA_REGS 1063 SAVE_EXTRA_REGS
1229 jmp error_exit 1064 jmp error_exit
1230 CFI_ENDPROC
1231END(xen_failsafe_callback) 1065END(xen_failsafe_callback)
1232 1066
1233apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ 1067apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
@@ -1263,7 +1097,6 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(
1263 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise 1097 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
1264 */ 1098 */
1265ENTRY(paranoid_entry) 1099ENTRY(paranoid_entry)
1266 XCPT_FRAME 1 15*8
1267 cld 1100 cld
1268 SAVE_C_REGS 8 1101 SAVE_C_REGS 8
1269 SAVE_EXTRA_REGS 8 1102 SAVE_EXTRA_REGS 8
@@ -1275,7 +1108,6 @@ ENTRY(paranoid_entry)
1275 SWAPGS 1108 SWAPGS
1276 xorl %ebx,%ebx 1109 xorl %ebx,%ebx
12771: ret 11101: ret
1278 CFI_ENDPROC
1279END(paranoid_entry) 1111END(paranoid_entry)
1280 1112
1281/* 1113/*
@@ -1290,7 +1122,6 @@ END(paranoid_entry)
1290 */ 1122 */
1291/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ 1123/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
1292ENTRY(paranoid_exit) 1124ENTRY(paranoid_exit)
1293 DEFAULT_FRAME
1294 DISABLE_INTERRUPTS(CLBR_NONE) 1125 DISABLE_INTERRUPTS(CLBR_NONE)
1295 TRACE_IRQS_OFF_DEBUG 1126 TRACE_IRQS_OFF_DEBUG
1296 testl %ebx,%ebx /* swapgs needed? */ 1127 testl %ebx,%ebx /* swapgs needed? */
@@ -1305,7 +1136,6 @@ paranoid_exit_restore:
1305 RESTORE_C_REGS 1136 RESTORE_C_REGS
1306 REMOVE_PT_GPREGS_FROM_STACK 8 1137 REMOVE_PT_GPREGS_FROM_STACK 8
1307 INTERRUPT_RETURN 1138 INTERRUPT_RETURN
1308 CFI_ENDPROC
1309END(paranoid_exit) 1139END(paranoid_exit)
1310 1140
1311/* 1141/*
@@ -1313,7 +1143,6 @@ END(paranoid_exit)
1313 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise 1143 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
1314 */ 1144 */
1315ENTRY(error_entry) 1145ENTRY(error_entry)
1316 XCPT_FRAME 1 15*8
1317 cld 1146 cld
1318 SAVE_C_REGS 8 1147 SAVE_C_REGS 8
1319 SAVE_EXTRA_REGS 8 1148 SAVE_EXTRA_REGS 8
@@ -1333,7 +1162,6 @@ error_sti:
1333 * for these here too. 1162 * for these here too.
1334 */ 1163 */
1335error_kernelspace: 1164error_kernelspace:
1336 CFI_REL_OFFSET rcx, RCX+8
1337 incl %ebx 1165 incl %ebx
1338 leaq native_irq_return_iret(%rip),%rcx 1166 leaq native_irq_return_iret(%rip),%rcx
1339 cmpq %rcx,RIP+8(%rsp) 1167 cmpq %rcx,RIP+8(%rsp)
@@ -1357,13 +1185,11 @@ error_bad_iret:
1357 mov %rax,%rsp 1185 mov %rax,%rsp
1358 decl %ebx /* Return to usergs */ 1186 decl %ebx /* Return to usergs */
1359 jmp error_sti 1187 jmp error_sti
1360 CFI_ENDPROC
1361END(error_entry) 1188END(error_entry)
1362 1189
1363 1190
1364/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ 1191/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
1365ENTRY(error_exit) 1192ENTRY(error_exit)
1366 DEFAULT_FRAME
1367 movl %ebx,%eax 1193 movl %ebx,%eax
1368 RESTORE_EXTRA_REGS 1194 RESTORE_EXTRA_REGS
1369 DISABLE_INTERRUPTS(CLBR_NONE) 1195 DISABLE_INTERRUPTS(CLBR_NONE)
@@ -1377,12 +1203,10 @@ ENTRY(error_exit)
1377 andl %edi,%edx 1203 andl %edi,%edx
1378 jnz retint_careful 1204 jnz retint_careful
1379 jmp retint_swapgs 1205 jmp retint_swapgs
1380 CFI_ENDPROC
1381END(error_exit) 1206END(error_exit)
1382 1207
1383/* Runs on exception stack */ 1208/* Runs on exception stack */
1384ENTRY(nmi) 1209ENTRY(nmi)
1385 INTR_FRAME
1386 PARAVIRT_ADJUST_EXCEPTION_FRAME 1210 PARAVIRT_ADJUST_EXCEPTION_FRAME
1387 /* 1211 /*
1388 * We allow breakpoints in NMIs. If a breakpoint occurs, then 1212 * We allow breakpoints in NMIs. If a breakpoint occurs, then
@@ -1417,8 +1241,7 @@ ENTRY(nmi)
1417 */ 1241 */
1418 1242
1419 /* Use %rdx as our temp variable throughout */ 1243 /* Use %rdx as our temp variable throughout */
1420 pushq_cfi %rdx 1244 pushq %rdx
1421 CFI_REL_OFFSET rdx, 0
1422 1245
1423 /* 1246 /*
1424 * If %cs was not the kernel segment, then the NMI triggered in user 1247 * If %cs was not the kernel segment, then the NMI triggered in user
@@ -1452,8 +1275,6 @@ ENTRY(nmi)
1452 jb first_nmi 1275 jb first_nmi
1453 /* Ah, it is within the NMI stack, treat it as nested */ 1276 /* Ah, it is within the NMI stack, treat it as nested */
1454 1277
1455 CFI_REMEMBER_STATE
1456
1457nested_nmi: 1278nested_nmi:
1458 /* 1279 /*
1459 * Do nothing if we interrupted the fixup in repeat_nmi. 1280 * Do nothing if we interrupted the fixup in repeat_nmi.
@@ -1471,26 +1292,22 @@ nested_nmi:
1471 /* Set up the interrupted NMIs stack to jump to repeat_nmi */ 1292 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
1472 leaq -1*8(%rsp), %rdx 1293 leaq -1*8(%rsp), %rdx
1473 movq %rdx, %rsp 1294 movq %rdx, %rsp
1474 CFI_ADJUST_CFA_OFFSET 1*8
1475 leaq -10*8(%rsp), %rdx 1295 leaq -10*8(%rsp), %rdx
1476 pushq_cfi $__KERNEL_DS 1296 pushq $__KERNEL_DS
1477 pushq_cfi %rdx 1297 pushq %rdx
1478 pushfq_cfi 1298 pushfq
1479 pushq_cfi $__KERNEL_CS 1299 pushq $__KERNEL_CS
1480 pushq_cfi $repeat_nmi 1300 pushq $repeat_nmi
1481 1301
1482 /* Put stack back */ 1302 /* Put stack back */
1483 addq $(6*8), %rsp 1303 addq $(6*8), %rsp
1484 CFI_ADJUST_CFA_OFFSET -6*8
1485 1304
1486nested_nmi_out: 1305nested_nmi_out:
1487 popq_cfi %rdx 1306 popq %rdx
1488 CFI_RESTORE rdx
1489 1307
1490 /* No need to check faults here */ 1308 /* No need to check faults here */
1491 INTERRUPT_RETURN 1309 INTERRUPT_RETURN
1492 1310
1493 CFI_RESTORE_STATE
1494first_nmi: 1311first_nmi:
1495 /* 1312 /*
1496 * Because nested NMIs will use the pushed location that we 1313 * Because nested NMIs will use the pushed location that we
@@ -1529,22 +1346,19 @@ first_nmi:
1529 */ 1346 */
1530 /* Do not pop rdx, nested NMIs will corrupt that part of the stack */ 1347 /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
1531 movq (%rsp), %rdx 1348 movq (%rsp), %rdx
1532 CFI_RESTORE rdx
1533 1349
1534 /* Set the NMI executing variable on the stack. */ 1350 /* Set the NMI executing variable on the stack. */
1535 pushq_cfi $1 1351 pushq $1
1536 1352
1537 /* 1353 /*
1538 * Leave room for the "copied" frame 1354 * Leave room for the "copied" frame
1539 */ 1355 */
1540 subq $(5*8), %rsp 1356 subq $(5*8), %rsp
1541 CFI_ADJUST_CFA_OFFSET 5*8
1542 1357
1543 /* Copy the stack frame to the Saved frame */ 1358 /* Copy the stack frame to the Saved frame */
1544 .rept 5 1359 .rept 5
1545 pushq_cfi 11*8(%rsp) 1360 pushq 11*8(%rsp)
1546 .endr 1361 .endr
1547 CFI_DEF_CFA_OFFSET 5*8
1548 1362
1549 /* Everything up to here is safe from nested NMIs */ 1363 /* Everything up to here is safe from nested NMIs */
1550 1364
@@ -1567,12 +1381,10 @@ repeat_nmi:
1567 1381
1568 /* Make another copy, this one may be modified by nested NMIs */ 1382 /* Make another copy, this one may be modified by nested NMIs */
1569 addq $(10*8), %rsp 1383 addq $(10*8), %rsp
1570 CFI_ADJUST_CFA_OFFSET -10*8
1571 .rept 5 1384 .rept 5
1572 pushq_cfi -6*8(%rsp) 1385 pushq -6*8(%rsp)
1573 .endr 1386 .endr
1574 subq $(5*8), %rsp 1387 subq $(5*8), %rsp
1575 CFI_DEF_CFA_OFFSET 5*8
1576end_repeat_nmi: 1388end_repeat_nmi:
1577 1389
1578 /* 1390 /*
@@ -1580,7 +1392,7 @@ end_repeat_nmi:
1580 * NMI if the first NMI took an exception and reset our iret stack 1392 * NMI if the first NMI took an exception and reset our iret stack
1581 * so that we repeat another NMI. 1393 * so that we repeat another NMI.
1582 */ 1394 */
1583 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 1395 pushq $-1 /* ORIG_RAX: no syscall to restart */
1584 ALLOC_PT_GPREGS_ON_STACK 1396 ALLOC_PT_GPREGS_ON_STACK
1585 1397
1586 /* 1398 /*
@@ -1591,7 +1403,6 @@ end_repeat_nmi:
1591 * exceptions might do. 1403 * exceptions might do.
1592 */ 1404 */
1593 call paranoid_entry 1405 call paranoid_entry
1594 DEFAULT_FRAME 0
1595 1406
1596 /* 1407 /*
1597 * Save off the CR2 register. If we take a page fault in the NMI then 1408 * Save off the CR2 register. If we take a page fault in the NMI then
@@ -1628,13 +1439,10 @@ nmi_restore:
1628 /* Clear the NMI executing stack variable */ 1439 /* Clear the NMI executing stack variable */
1629 movq $0, 5*8(%rsp) 1440 movq $0, 5*8(%rsp)
1630 jmp irq_return 1441 jmp irq_return
1631 CFI_ENDPROC
1632END(nmi) 1442END(nmi)
1633 1443
1634ENTRY(ignore_sysret) 1444ENTRY(ignore_sysret)
1635 CFI_STARTPROC
1636 mov $-ENOSYS,%eax 1445 mov $-ENOSYS,%eax
1637 sysret 1446 sysret
1638 CFI_ENDPROC
1639END(ignore_sysret) 1447END(ignore_sysret)
1640 1448
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
index 00933d5e992f..9b0ca8fe80fc 100644
--- a/arch/x86/lib/atomic64_386_32.S
+++ b/arch/x86/lib/atomic64_386_32.S
@@ -11,26 +11,23 @@
11 11
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/alternative-asm.h> 13#include <asm/alternative-asm.h>
14#include <asm/dwarf2.h>
15 14
16/* if you want SMP support, implement these with real spinlocks */ 15/* if you want SMP support, implement these with real spinlocks */
17.macro LOCK reg 16.macro LOCK reg
18 pushfl_cfi 17 pushfl
19 cli 18 cli
20.endm 19.endm
21 20
22.macro UNLOCK reg 21.macro UNLOCK reg
23 popfl_cfi 22 popfl
24.endm 23.endm
25 24
26#define BEGIN(op) \ 25#define BEGIN(op) \
27.macro endp; \ 26.macro endp; \
28 CFI_ENDPROC; \
29ENDPROC(atomic64_##op##_386); \ 27ENDPROC(atomic64_##op##_386); \
30.purgem endp; \ 28.purgem endp; \
31.endm; \ 29.endm; \
32ENTRY(atomic64_##op##_386); \ 30ENTRY(atomic64_##op##_386); \
33 CFI_STARTPROC; \
34 LOCK v; 31 LOCK v;
35 32
36#define ENDP endp 33#define ENDP endp
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
index 082a85167a5b..db3ae85440ff 100644
--- a/arch/x86/lib/atomic64_cx8_32.S
+++ b/arch/x86/lib/atomic64_cx8_32.S
@@ -11,7 +11,6 @@
11 11
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/alternative-asm.h> 13#include <asm/alternative-asm.h>
14#include <asm/dwarf2.h>
15 14
16.macro read64 reg 15.macro read64 reg
17 movl %ebx, %eax 16 movl %ebx, %eax
@@ -22,16 +21,11 @@
22.endm 21.endm
23 22
24ENTRY(atomic64_read_cx8) 23ENTRY(atomic64_read_cx8)
25 CFI_STARTPROC
26
27 read64 %ecx 24 read64 %ecx
28 ret 25 ret
29 CFI_ENDPROC
30ENDPROC(atomic64_read_cx8) 26ENDPROC(atomic64_read_cx8)
31 27
32ENTRY(atomic64_set_cx8) 28ENTRY(atomic64_set_cx8)
33 CFI_STARTPROC
34
351: 291:
36/* we don't need LOCK_PREFIX since aligned 64-bit writes 30/* we don't need LOCK_PREFIX since aligned 64-bit writes
37 * are atomic on 586 and newer */ 31 * are atomic on 586 and newer */
@@ -39,28 +33,23 @@ ENTRY(atomic64_set_cx8)
39 jne 1b 33 jne 1b
40 34
41 ret 35 ret
42 CFI_ENDPROC
43ENDPROC(atomic64_set_cx8) 36ENDPROC(atomic64_set_cx8)
44 37
45ENTRY(atomic64_xchg_cx8) 38ENTRY(atomic64_xchg_cx8)
46 CFI_STARTPROC
47
481: 391:
49 LOCK_PREFIX 40 LOCK_PREFIX
50 cmpxchg8b (%esi) 41 cmpxchg8b (%esi)
51 jne 1b 42 jne 1b
52 43
53 ret 44 ret
54 CFI_ENDPROC
55ENDPROC(atomic64_xchg_cx8) 45ENDPROC(atomic64_xchg_cx8)
56 46
57.macro addsub_return func ins insc 47.macro addsub_return func ins insc
58ENTRY(atomic64_\func\()_return_cx8) 48ENTRY(atomic64_\func\()_return_cx8)
59 CFI_STARTPROC 49 pushl %ebp
60 pushl_cfi_reg ebp 50 pushl %ebx
61 pushl_cfi_reg ebx 51 pushl %esi
62 pushl_cfi_reg esi 52 pushl %edi
63 pushl_cfi_reg edi
64 53
65 movl %eax, %esi 54 movl %eax, %esi
66 movl %edx, %edi 55 movl %edx, %edi
@@ -79,12 +68,11 @@ ENTRY(atomic64_\func\()_return_cx8)
7910: 6810:
80 movl %ebx, %eax 69 movl %ebx, %eax
81 movl %ecx, %edx 70 movl %ecx, %edx
82 popl_cfi_reg edi 71 popl %edi
83 popl_cfi_reg esi 72 popl %esi
84 popl_cfi_reg ebx 73 popl %ebx
85 popl_cfi_reg ebp 74 popl %ebp
86 ret 75 ret
87 CFI_ENDPROC
88ENDPROC(atomic64_\func\()_return_cx8) 76ENDPROC(atomic64_\func\()_return_cx8)
89.endm 77.endm
90 78
@@ -93,8 +81,7 @@ addsub_return sub sub sbb
93 81
94.macro incdec_return func ins insc 82.macro incdec_return func ins insc
95ENTRY(atomic64_\func\()_return_cx8) 83ENTRY(atomic64_\func\()_return_cx8)
96 CFI_STARTPROC 84 pushl %ebx
97 pushl_cfi_reg ebx
98 85
99 read64 %esi 86 read64 %esi
1001: 871:
@@ -109,9 +96,8 @@ ENTRY(atomic64_\func\()_return_cx8)
10910: 9610:
110 movl %ebx, %eax 97 movl %ebx, %eax
111 movl %ecx, %edx 98 movl %ecx, %edx
112 popl_cfi_reg ebx 99 popl %ebx
113 ret 100 ret
114 CFI_ENDPROC
115ENDPROC(atomic64_\func\()_return_cx8) 101ENDPROC(atomic64_\func\()_return_cx8)
116.endm 102.endm
117 103
@@ -119,8 +105,7 @@ incdec_return inc add adc
119incdec_return dec sub sbb 105incdec_return dec sub sbb
120 106
121ENTRY(atomic64_dec_if_positive_cx8) 107ENTRY(atomic64_dec_if_positive_cx8)
122 CFI_STARTPROC 108 pushl %ebx
123 pushl_cfi_reg ebx
124 109
125 read64 %esi 110 read64 %esi
1261: 1111:
@@ -136,18 +121,16 @@ ENTRY(atomic64_dec_if_positive_cx8)
1362: 1212:
137 movl %ebx, %eax 122 movl %ebx, %eax
138 movl %ecx, %edx 123 movl %ecx, %edx
139 popl_cfi_reg ebx 124 popl %ebx
140 ret 125 ret
141 CFI_ENDPROC
142ENDPROC(atomic64_dec_if_positive_cx8) 126ENDPROC(atomic64_dec_if_positive_cx8)
143 127
144ENTRY(atomic64_add_unless_cx8) 128ENTRY(atomic64_add_unless_cx8)
145 CFI_STARTPROC 129 pushl %ebp
146 pushl_cfi_reg ebp 130 pushl %ebx
147 pushl_cfi_reg ebx
148/* these just push these two parameters on the stack */ 131/* these just push these two parameters on the stack */
149 pushl_cfi_reg edi 132 pushl %edi
150 pushl_cfi_reg ecx 133 pushl %ecx
151 134
152 movl %eax, %ebp 135 movl %eax, %ebp
153 movl %edx, %edi 136 movl %edx, %edi
@@ -168,21 +151,18 @@ ENTRY(atomic64_add_unless_cx8)
168 movl $1, %eax 151 movl $1, %eax
1693: 1523:
170 addl $8, %esp 153 addl $8, %esp
171 CFI_ADJUST_CFA_OFFSET -8 154 popl %ebx
172 popl_cfi_reg ebx 155 popl %ebp
173 popl_cfi_reg ebp
174 ret 156 ret
1754: 1574:
176 cmpl %edx, 4(%esp) 158 cmpl %edx, 4(%esp)
177 jne 2b 159 jne 2b
178 xorl %eax, %eax 160 xorl %eax, %eax
179 jmp 3b 161 jmp 3b
180 CFI_ENDPROC
181ENDPROC(atomic64_add_unless_cx8) 162ENDPROC(atomic64_add_unless_cx8)
182 163
183ENTRY(atomic64_inc_not_zero_cx8) 164ENTRY(atomic64_inc_not_zero_cx8)
184 CFI_STARTPROC 165 pushl %ebx
185 pushl_cfi_reg ebx
186 166
187 read64 %esi 167 read64 %esi
1881: 1681:
@@ -199,7 +179,6 @@ ENTRY(atomic64_inc_not_zero_cx8)
199 179
200 movl $1, %eax 180 movl $1, %eax
2013: 1813:
202 popl_cfi_reg ebx 182 popl %ebx
203 ret 183 ret
204 CFI_ENDPROC
205ENDPROC(atomic64_inc_not_zero_cx8) 184ENDPROC(atomic64_inc_not_zero_cx8)
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 9bc944a91274..c1e623209853 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -26,7 +26,6 @@
26 */ 26 */
27 27
28#include <linux/linkage.h> 28#include <linux/linkage.h>
29#include <asm/dwarf2.h>
30#include <asm/errno.h> 29#include <asm/errno.h>
31#include <asm/asm.h> 30#include <asm/asm.h>
32 31
@@ -50,9 +49,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
50 * alignment for the unrolled loop. 49 * alignment for the unrolled loop.
51 */ 50 */
52ENTRY(csum_partial) 51ENTRY(csum_partial)
53 CFI_STARTPROC 52 pushl %esi
54 pushl_cfi_reg esi 53 pushl %ebx
55 pushl_cfi_reg ebx
56 movl 20(%esp),%eax # Function arg: unsigned int sum 54 movl 20(%esp),%eax # Function arg: unsigned int sum
57 movl 16(%esp),%ecx # Function arg: int len 55 movl 16(%esp),%ecx # Function arg: int len
58 movl 12(%esp),%esi # Function arg: unsigned char *buff 56 movl 12(%esp),%esi # Function arg: unsigned char *buff
@@ -129,10 +127,9 @@ ENTRY(csum_partial)
129 jz 8f 127 jz 8f
130 roll $8, %eax 128 roll $8, %eax
1318: 1298:
132 popl_cfi_reg ebx 130 popl %ebx
133 popl_cfi_reg esi 131 popl %esi
134 ret 132 ret
135 CFI_ENDPROC
136ENDPROC(csum_partial) 133ENDPROC(csum_partial)
137 134
138#else 135#else
@@ -140,9 +137,8 @@ ENDPROC(csum_partial)
140/* Version for PentiumII/PPro */ 137/* Version for PentiumII/PPro */
141 138
142ENTRY(csum_partial) 139ENTRY(csum_partial)
143 CFI_STARTPROC 140 pushl %esi
144 pushl_cfi_reg esi 141 pushl %ebx
145 pushl_cfi_reg ebx
146 movl 20(%esp),%eax # Function arg: unsigned int sum 142 movl 20(%esp),%eax # Function arg: unsigned int sum
147 movl 16(%esp),%ecx # Function arg: int len 143 movl 16(%esp),%ecx # Function arg: int len
148 movl 12(%esp),%esi # Function arg: const unsigned char *buf 144 movl 12(%esp),%esi # Function arg: const unsigned char *buf
@@ -249,10 +245,9 @@ ENTRY(csum_partial)
249 jz 90f 245 jz 90f
250 roll $8, %eax 246 roll $8, %eax
25190: 24790:
252 popl_cfi_reg ebx 248 popl %ebx
253 popl_cfi_reg esi 249 popl %esi
254 ret 250 ret
255 CFI_ENDPROC
256ENDPROC(csum_partial) 251ENDPROC(csum_partial)
257 252
258#endif 253#endif
@@ -287,12 +282,10 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
287#define FP 12 282#define FP 12
288 283
289ENTRY(csum_partial_copy_generic) 284ENTRY(csum_partial_copy_generic)
290 CFI_STARTPROC
291 subl $4,%esp 285 subl $4,%esp
292 CFI_ADJUST_CFA_OFFSET 4 286 pushl %edi
293 pushl_cfi_reg edi 287 pushl %esi
294 pushl_cfi_reg esi 288 pushl %ebx
295 pushl_cfi_reg ebx
296 movl ARGBASE+16(%esp),%eax # sum 289 movl ARGBASE+16(%esp),%eax # sum
297 movl ARGBASE+12(%esp),%ecx # len 290 movl ARGBASE+12(%esp),%ecx # len
298 movl ARGBASE+4(%esp),%esi # src 291 movl ARGBASE+4(%esp),%esi # src
@@ -401,12 +394,11 @@ DST( movb %cl, (%edi) )
401 394
402.previous 395.previous
403 396
404 popl_cfi_reg ebx 397 popl %ebx
405 popl_cfi_reg esi 398 popl %esi
406 popl_cfi_reg edi 399 popl %edi
407 popl_cfi %ecx # equivalent to addl $4,%esp 400 popl %ecx # equivalent to addl $4,%esp
408 ret 401 ret
409 CFI_ENDPROC
410ENDPROC(csum_partial_copy_generic) 402ENDPROC(csum_partial_copy_generic)
411 403
412#else 404#else
@@ -426,10 +418,9 @@ ENDPROC(csum_partial_copy_generic)
426#define ARGBASE 12 418#define ARGBASE 12
427 419
428ENTRY(csum_partial_copy_generic) 420ENTRY(csum_partial_copy_generic)
429 CFI_STARTPROC 421 pushl %ebx
430 pushl_cfi_reg ebx 422 pushl %edi
431 pushl_cfi_reg edi 423 pushl %esi
432 pushl_cfi_reg esi
433 movl ARGBASE+4(%esp),%esi #src 424 movl ARGBASE+4(%esp),%esi #src
434 movl ARGBASE+8(%esp),%edi #dst 425 movl ARGBASE+8(%esp),%edi #dst
435 movl ARGBASE+12(%esp),%ecx #len 426 movl ARGBASE+12(%esp),%ecx #len
@@ -489,11 +480,10 @@ DST( movb %dl, (%edi) )
489 jmp 7b 480 jmp 7b
490.previous 481.previous
491 482
492 popl_cfi_reg esi 483 popl %esi
493 popl_cfi_reg edi 484 popl %edi
494 popl_cfi_reg ebx 485 popl %ebx
495 ret 486 ret
496 CFI_ENDPROC
497ENDPROC(csum_partial_copy_generic) 487ENDPROC(csum_partial_copy_generic)
498 488
499#undef ROUND 489#undef ROUND
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index e67e579c93bd..a2fe51b00cce 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -1,5 +1,4 @@
1#include <linux/linkage.h> 1#include <linux/linkage.h>
2#include <asm/dwarf2.h>
3#include <asm/cpufeature.h> 2#include <asm/cpufeature.h>
4#include <asm/alternative-asm.h> 3#include <asm/alternative-asm.h>
5 4
@@ -15,7 +14,6 @@
15 * %rdi - page 14 * %rdi - page
16 */ 15 */
17ENTRY(clear_page) 16ENTRY(clear_page)
18 CFI_STARTPROC
19 17
20 ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \ 18 ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \
21 "jmp clear_page_c_e", X86_FEATURE_ERMS 19 "jmp clear_page_c_e", X86_FEATURE_ERMS
@@ -24,11 +22,9 @@ ENTRY(clear_page)
24 xorl %eax,%eax 22 xorl %eax,%eax
25 rep stosq 23 rep stosq
26 ret 24 ret
27 CFI_ENDPROC
28ENDPROC(clear_page) 25ENDPROC(clear_page)
29 26
30ENTRY(clear_page_orig) 27ENTRY(clear_page_orig)
31 CFI_STARTPROC
32 28
33 xorl %eax,%eax 29 xorl %eax,%eax
34 movl $4096/64,%ecx 30 movl $4096/64,%ecx
@@ -48,14 +44,11 @@ ENTRY(clear_page_orig)
48 jnz .Lloop 44 jnz .Lloop
49 nop 45 nop
50 ret 46 ret
51 CFI_ENDPROC
52ENDPROC(clear_page_orig) 47ENDPROC(clear_page_orig)
53 48
54ENTRY(clear_page_c_e) 49ENTRY(clear_page_c_e)
55 CFI_STARTPROC
56 movl $4096,%ecx 50 movl $4096,%ecx
57 xorl %eax,%eax 51 xorl %eax,%eax
58 rep stosb 52 rep stosb
59 ret 53 ret
60 CFI_ENDPROC
61ENDPROC(clear_page_c_e) 54ENDPROC(clear_page_c_e)
diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
index 40a172541ee2..9b330242e740 100644
--- a/arch/x86/lib/cmpxchg16b_emu.S
+++ b/arch/x86/lib/cmpxchg16b_emu.S
@@ -6,7 +6,6 @@
6 * 6 *
7 */ 7 */
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/dwarf2.h>
10#include <asm/percpu.h> 9#include <asm/percpu.h>
11 10
12.text 11.text
@@ -21,7 +20,6 @@
21 * %al : Operation successful 20 * %al : Operation successful
22 */ 21 */
23ENTRY(this_cpu_cmpxchg16b_emu) 22ENTRY(this_cpu_cmpxchg16b_emu)
24CFI_STARTPROC
25 23
26# 24#
27# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not 25# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
@@ -32,7 +30,7 @@ CFI_STARTPROC
32# *atomic* on a single cpu (as provided by the this_cpu_xx class of 30# *atomic* on a single cpu (as provided by the this_cpu_xx class of
33# macros). 31# macros).
34# 32#
35 pushfq_cfi 33 pushfq
36 cli 34 cli
37 35
38 cmpq PER_CPU_VAR((%rsi)), %rax 36 cmpq PER_CPU_VAR((%rsi)), %rax
@@ -43,17 +41,13 @@ CFI_STARTPROC
43 movq %rbx, PER_CPU_VAR((%rsi)) 41 movq %rbx, PER_CPU_VAR((%rsi))
44 movq %rcx, PER_CPU_VAR(8(%rsi)) 42 movq %rcx, PER_CPU_VAR(8(%rsi))
45 43
46 CFI_REMEMBER_STATE 44 popfq
47 popfq_cfi
48 mov $1, %al 45 mov $1, %al
49 ret 46 ret
50 47
51 CFI_RESTORE_STATE
52.Lnot_same: 48.Lnot_same:
53 popfq_cfi 49 popfq
54 xor %al,%al 50 xor %al,%al
55 ret 51 ret
56 52
57CFI_ENDPROC
58
59ENDPROC(this_cpu_cmpxchg16b_emu) 53ENDPROC(this_cpu_cmpxchg16b_emu)
diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S
index b4807fce5177..ad5349778490 100644
--- a/arch/x86/lib/cmpxchg8b_emu.S
+++ b/arch/x86/lib/cmpxchg8b_emu.S
@@ -7,7 +7,6 @@
7 */ 7 */
8 8
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/dwarf2.h>
11 10
12.text 11.text
13 12
@@ -20,14 +19,13 @@
20 * %ecx : high 32 bits of new value 19 * %ecx : high 32 bits of new value
21 */ 20 */
22ENTRY(cmpxchg8b_emu) 21ENTRY(cmpxchg8b_emu)
23CFI_STARTPROC
24 22
25# 23#
26# Emulate 'cmpxchg8b (%esi)' on UP except we don't 24# Emulate 'cmpxchg8b (%esi)' on UP except we don't
27# set the whole ZF thing (caller will just compare 25# set the whole ZF thing (caller will just compare
28# eax:edx with the expected value) 26# eax:edx with the expected value)
29# 27#
30 pushfl_cfi 28 pushfl
31 cli 29 cli
32 30
33 cmpl (%esi), %eax 31 cmpl (%esi), %eax
@@ -38,18 +36,15 @@ CFI_STARTPROC
38 movl %ebx, (%esi) 36 movl %ebx, (%esi)
39 movl %ecx, 4(%esi) 37 movl %ecx, 4(%esi)
40 38
41 CFI_REMEMBER_STATE 39 popfl
42 popfl_cfi
43 ret 40 ret
44 41
45 CFI_RESTORE_STATE
46.Lnot_same: 42.Lnot_same:
47 movl (%esi), %eax 43 movl (%esi), %eax
48.Lhalf_same: 44.Lhalf_same:
49 movl 4(%esi), %edx 45 movl 4(%esi), %edx
50 46
51 popfl_cfi 47 popfl
52 ret 48 ret
53 49
54CFI_ENDPROC
55ENDPROC(cmpxchg8b_emu) 50ENDPROC(cmpxchg8b_emu)
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index 8239dbcbf984..009f98216b7e 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -1,7 +1,6 @@
1/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */ 1/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
2 2
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/dwarf2.h>
5#include <asm/cpufeature.h> 4#include <asm/cpufeature.h>
6#include <asm/alternative-asm.h> 5#include <asm/alternative-asm.h>
7 6
@@ -13,22 +12,16 @@
13 */ 12 */
14 ALIGN 13 ALIGN
15ENTRY(copy_page) 14ENTRY(copy_page)
16 CFI_STARTPROC
17 ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD 15 ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
18 movl $4096/8, %ecx 16 movl $4096/8, %ecx
19 rep movsq 17 rep movsq
20 ret 18 ret
21 CFI_ENDPROC
22ENDPROC(copy_page) 19ENDPROC(copy_page)
23 20
24ENTRY(copy_page_regs) 21ENTRY(copy_page_regs)
25 CFI_STARTPROC
26 subq $2*8, %rsp 22 subq $2*8, %rsp
27 CFI_ADJUST_CFA_OFFSET 2*8
28 movq %rbx, (%rsp) 23 movq %rbx, (%rsp)
29 CFI_REL_OFFSET rbx, 0
30 movq %r12, 1*8(%rsp) 24 movq %r12, 1*8(%rsp)
31 CFI_REL_OFFSET r12, 1*8
32 25
33 movl $(4096/64)-5, %ecx 26 movl $(4096/64)-5, %ecx
34 .p2align 4 27 .p2align 4
@@ -87,11 +80,7 @@ ENTRY(copy_page_regs)
87 jnz .Loop2 80 jnz .Loop2
88 81
89 movq (%rsp), %rbx 82 movq (%rsp), %rbx
90 CFI_RESTORE rbx
91 movq 1*8(%rsp), %r12 83 movq 1*8(%rsp), %r12
92 CFI_RESTORE r12
93 addq $2*8, %rsp 84 addq $2*8, %rsp
94 CFI_ADJUST_CFA_OFFSET -2*8
95 ret 85 ret
96 CFI_ENDPROC
97ENDPROC(copy_page_regs) 86ENDPROC(copy_page_regs)
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index e4b3beee83bd..982ce34f4a9b 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -7,7 +7,6 @@
7 */ 7 */
8 8
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/dwarf2.h>
11#include <asm/current.h> 10#include <asm/current.h>
12#include <asm/asm-offsets.h> 11#include <asm/asm-offsets.h>
13#include <asm/thread_info.h> 12#include <asm/thread_info.h>
@@ -18,7 +17,6 @@
18 17
19/* Standard copy_to_user with segment limit checking */ 18/* Standard copy_to_user with segment limit checking */
20ENTRY(_copy_to_user) 19ENTRY(_copy_to_user)
21 CFI_STARTPROC
22 GET_THREAD_INFO(%rax) 20 GET_THREAD_INFO(%rax)
23 movq %rdi,%rcx 21 movq %rdi,%rcx
24 addq %rdx,%rcx 22 addq %rdx,%rcx
@@ -30,12 +28,10 @@ ENTRY(_copy_to_user)
30 X86_FEATURE_REP_GOOD, \ 28 X86_FEATURE_REP_GOOD, \
31 "jmp copy_user_enhanced_fast_string", \ 29 "jmp copy_user_enhanced_fast_string", \
32 X86_FEATURE_ERMS 30 X86_FEATURE_ERMS
33 CFI_ENDPROC
34ENDPROC(_copy_to_user) 31ENDPROC(_copy_to_user)
35 32
36/* Standard copy_from_user with segment limit checking */ 33/* Standard copy_from_user with segment limit checking */
37ENTRY(_copy_from_user) 34ENTRY(_copy_from_user)
38 CFI_STARTPROC
39 GET_THREAD_INFO(%rax) 35 GET_THREAD_INFO(%rax)
40 movq %rsi,%rcx 36 movq %rsi,%rcx
41 addq %rdx,%rcx 37 addq %rdx,%rcx
@@ -47,14 +43,12 @@ ENTRY(_copy_from_user)
47 X86_FEATURE_REP_GOOD, \ 43 X86_FEATURE_REP_GOOD, \
48 "jmp copy_user_enhanced_fast_string", \ 44 "jmp copy_user_enhanced_fast_string", \
49 X86_FEATURE_ERMS 45 X86_FEATURE_ERMS
50 CFI_ENDPROC
51ENDPROC(_copy_from_user) 46ENDPROC(_copy_from_user)
52 47
53 .section .fixup,"ax" 48 .section .fixup,"ax"
54 /* must zero dest */ 49 /* must zero dest */
55ENTRY(bad_from_user) 50ENTRY(bad_from_user)
56bad_from_user: 51bad_from_user:
57 CFI_STARTPROC
58 movl %edx,%ecx 52 movl %edx,%ecx
59 xorl %eax,%eax 53 xorl %eax,%eax
60 rep 54 rep
@@ -62,7 +56,6 @@ bad_from_user:
62bad_to_user: 56bad_to_user:
63 movl %edx,%eax 57 movl %edx,%eax
64 ret 58 ret
65 CFI_ENDPROC
66ENDPROC(bad_from_user) 59ENDPROC(bad_from_user)
67 .previous 60 .previous
68 61
@@ -80,7 +73,6 @@ ENDPROC(bad_from_user)
80 * eax uncopied bytes or 0 if successful. 73 * eax uncopied bytes or 0 if successful.
81 */ 74 */
82ENTRY(copy_user_generic_unrolled) 75ENTRY(copy_user_generic_unrolled)
83 CFI_STARTPROC
84 ASM_STAC 76 ASM_STAC
85 cmpl $8,%edx 77 cmpl $8,%edx
86 jb 20f /* less then 8 bytes, go to byte copy loop */ 78 jb 20f /* less then 8 bytes, go to byte copy loop */
@@ -162,7 +154,6 @@ ENTRY(copy_user_generic_unrolled)
162 _ASM_EXTABLE(19b,40b) 154 _ASM_EXTABLE(19b,40b)
163 _ASM_EXTABLE(21b,50b) 155 _ASM_EXTABLE(21b,50b)
164 _ASM_EXTABLE(22b,50b) 156 _ASM_EXTABLE(22b,50b)
165 CFI_ENDPROC
166ENDPROC(copy_user_generic_unrolled) 157ENDPROC(copy_user_generic_unrolled)
167 158
168/* Some CPUs run faster using the string copy instructions. 159/* Some CPUs run faster using the string copy instructions.
@@ -184,7 +175,6 @@ ENDPROC(copy_user_generic_unrolled)
184 * eax uncopied bytes or 0 if successful. 175 * eax uncopied bytes or 0 if successful.
185 */ 176 */
186ENTRY(copy_user_generic_string) 177ENTRY(copy_user_generic_string)
187 CFI_STARTPROC
188 ASM_STAC 178 ASM_STAC
189 cmpl $8,%edx 179 cmpl $8,%edx
190 jb 2f /* less than 8 bytes, go to byte copy loop */ 180 jb 2f /* less than 8 bytes, go to byte copy loop */
@@ -209,7 +199,6 @@ ENTRY(copy_user_generic_string)
209 199
210 _ASM_EXTABLE(1b,11b) 200 _ASM_EXTABLE(1b,11b)
211 _ASM_EXTABLE(3b,12b) 201 _ASM_EXTABLE(3b,12b)
212 CFI_ENDPROC
213ENDPROC(copy_user_generic_string) 202ENDPROC(copy_user_generic_string)
214 203
215/* 204/*
@@ -225,7 +214,6 @@ ENDPROC(copy_user_generic_string)
225 * eax uncopied bytes or 0 if successful. 214 * eax uncopied bytes or 0 if successful.
226 */ 215 */
227ENTRY(copy_user_enhanced_fast_string) 216ENTRY(copy_user_enhanced_fast_string)
228 CFI_STARTPROC
229 ASM_STAC 217 ASM_STAC
230 movl %edx,%ecx 218 movl %edx,%ecx
2311: rep 2191: rep
@@ -240,7 +228,6 @@ ENTRY(copy_user_enhanced_fast_string)
240 .previous 228 .previous
241 229
242 _ASM_EXTABLE(1b,12b) 230 _ASM_EXTABLE(1b,12b)
243 CFI_ENDPROC
244ENDPROC(copy_user_enhanced_fast_string) 231ENDPROC(copy_user_enhanced_fast_string)
245 232
246/* 233/*
@@ -248,7 +235,6 @@ ENDPROC(copy_user_enhanced_fast_string)
248 * This will force destination/source out of cache for more performance. 235 * This will force destination/source out of cache for more performance.
249 */ 236 */
250ENTRY(__copy_user_nocache) 237ENTRY(__copy_user_nocache)
251 CFI_STARTPROC
252 ASM_STAC 238 ASM_STAC
253 cmpl $8,%edx 239 cmpl $8,%edx
254 jb 20f /* less then 8 bytes, go to byte copy loop */ 240 jb 20f /* less then 8 bytes, go to byte copy loop */
@@ -332,5 +318,4 @@ ENTRY(__copy_user_nocache)
332 _ASM_EXTABLE(19b,40b) 318 _ASM_EXTABLE(19b,40b)
333 _ASM_EXTABLE(21b,50b) 319 _ASM_EXTABLE(21b,50b)
334 _ASM_EXTABLE(22b,50b) 320 _ASM_EXTABLE(22b,50b)
335 CFI_ENDPROC
336ENDPROC(__copy_user_nocache) 321ENDPROC(__copy_user_nocache)
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index 9734182966f3..7e48807b2fa1 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -6,7 +6,6 @@
6 * for more details. No warranty for anything given at all. 6 * for more details. No warranty for anything given at all.
7 */ 7 */
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/dwarf2.h>
10#include <asm/errno.h> 9#include <asm/errno.h>
11#include <asm/asm.h> 10#include <asm/asm.h>
12 11
@@ -47,23 +46,16 @@
47 46
48 47
49ENTRY(csum_partial_copy_generic) 48ENTRY(csum_partial_copy_generic)
50 CFI_STARTPROC
51 cmpl $3*64, %edx 49 cmpl $3*64, %edx
52 jle .Lignore 50 jle .Lignore
53 51
54.Lignore: 52.Lignore:
55 subq $7*8, %rsp 53 subq $7*8, %rsp
56 CFI_ADJUST_CFA_OFFSET 7*8
57 movq %rbx, 2*8(%rsp) 54 movq %rbx, 2*8(%rsp)
58 CFI_REL_OFFSET rbx, 2*8
59 movq %r12, 3*8(%rsp) 55 movq %r12, 3*8(%rsp)
60 CFI_REL_OFFSET r12, 3*8
61 movq %r14, 4*8(%rsp) 56 movq %r14, 4*8(%rsp)
62 CFI_REL_OFFSET r14, 4*8
63 movq %r13, 5*8(%rsp) 57 movq %r13, 5*8(%rsp)
64 CFI_REL_OFFSET r13, 5*8
65 movq %rbp, 6*8(%rsp) 58 movq %rbp, 6*8(%rsp)
66 CFI_REL_OFFSET rbp, 6*8
67 59
68 movq %r8, (%rsp) 60 movq %r8, (%rsp)
69 movq %r9, 1*8(%rsp) 61 movq %r9, 1*8(%rsp)
@@ -206,22 +198,14 @@ ENTRY(csum_partial_copy_generic)
206 addl %ebx, %eax 198 addl %ebx, %eax
207 adcl %r9d, %eax /* carry */ 199 adcl %r9d, %eax /* carry */
208 200
209 CFI_REMEMBER_STATE
210.Lende: 201.Lende:
211 movq 2*8(%rsp), %rbx 202 movq 2*8(%rsp), %rbx
212 CFI_RESTORE rbx
213 movq 3*8(%rsp), %r12 203 movq 3*8(%rsp), %r12
214 CFI_RESTORE r12
215 movq 4*8(%rsp), %r14 204 movq 4*8(%rsp), %r14
216 CFI_RESTORE r14
217 movq 5*8(%rsp), %r13 205 movq 5*8(%rsp), %r13
218 CFI_RESTORE r13
219 movq 6*8(%rsp), %rbp 206 movq 6*8(%rsp), %rbp
220 CFI_RESTORE rbp
221 addq $7*8, %rsp 207 addq $7*8, %rsp
222 CFI_ADJUST_CFA_OFFSET -7*8
223 ret 208 ret
224 CFI_RESTORE_STATE
225 209
226 /* Exception handlers. Very simple, zeroing is done in the wrappers */ 210 /* Exception handlers. Very simple, zeroing is done in the wrappers */
227.Lbad_source: 211.Lbad_source:
@@ -237,5 +221,4 @@ ENTRY(csum_partial_copy_generic)
237 jz .Lende 221 jz .Lende
238 movl $-EFAULT, (%rax) 222 movl $-EFAULT, (%rax)
239 jmp .Lende 223 jmp .Lende
240 CFI_ENDPROC
241ENDPROC(csum_partial_copy_generic) 224ENDPROC(csum_partial_copy_generic)
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index a4512359656a..46668cda4ffd 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -26,7 +26,6 @@
26 */ 26 */
27 27
28#include <linux/linkage.h> 28#include <linux/linkage.h>
29#include <asm/dwarf2.h>
30#include <asm/page_types.h> 29#include <asm/page_types.h>
31#include <asm/errno.h> 30#include <asm/errno.h>
32#include <asm/asm-offsets.h> 31#include <asm/asm-offsets.h>
@@ -36,7 +35,6 @@
36 35
37 .text 36 .text
38ENTRY(__get_user_1) 37ENTRY(__get_user_1)
39 CFI_STARTPROC
40 GET_THREAD_INFO(%_ASM_DX) 38 GET_THREAD_INFO(%_ASM_DX)
41 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX 39 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
42 jae bad_get_user 40 jae bad_get_user
@@ -45,11 +43,9 @@ ENTRY(__get_user_1)
45 xor %eax,%eax 43 xor %eax,%eax
46 ASM_CLAC 44 ASM_CLAC
47 ret 45 ret
48 CFI_ENDPROC
49ENDPROC(__get_user_1) 46ENDPROC(__get_user_1)
50 47
51ENTRY(__get_user_2) 48ENTRY(__get_user_2)
52 CFI_STARTPROC
53 add $1,%_ASM_AX 49 add $1,%_ASM_AX
54 jc bad_get_user 50 jc bad_get_user
55 GET_THREAD_INFO(%_ASM_DX) 51 GET_THREAD_INFO(%_ASM_DX)
@@ -60,11 +56,9 @@ ENTRY(__get_user_2)
60 xor %eax,%eax 56 xor %eax,%eax
61 ASM_CLAC 57 ASM_CLAC
62 ret 58 ret
63 CFI_ENDPROC
64ENDPROC(__get_user_2) 59ENDPROC(__get_user_2)
65 60
66ENTRY(__get_user_4) 61ENTRY(__get_user_4)
67 CFI_STARTPROC
68 add $3,%_ASM_AX 62 add $3,%_ASM_AX
69 jc bad_get_user 63 jc bad_get_user
70 GET_THREAD_INFO(%_ASM_DX) 64 GET_THREAD_INFO(%_ASM_DX)
@@ -75,11 +69,9 @@ ENTRY(__get_user_4)
75 xor %eax,%eax 69 xor %eax,%eax
76 ASM_CLAC 70 ASM_CLAC
77 ret 71 ret
78 CFI_ENDPROC
79ENDPROC(__get_user_4) 72ENDPROC(__get_user_4)
80 73
81ENTRY(__get_user_8) 74ENTRY(__get_user_8)
82 CFI_STARTPROC
83#ifdef CONFIG_X86_64 75#ifdef CONFIG_X86_64
84 add $7,%_ASM_AX 76 add $7,%_ASM_AX
85 jc bad_get_user 77 jc bad_get_user
@@ -104,28 +96,23 @@ ENTRY(__get_user_8)
104 ASM_CLAC 96 ASM_CLAC
105 ret 97 ret
106#endif 98#endif
107 CFI_ENDPROC
108ENDPROC(__get_user_8) 99ENDPROC(__get_user_8)
109 100
110 101
111bad_get_user: 102bad_get_user:
112 CFI_STARTPROC
113 xor %edx,%edx 103 xor %edx,%edx
114 mov $(-EFAULT),%_ASM_AX 104 mov $(-EFAULT),%_ASM_AX
115 ASM_CLAC 105 ASM_CLAC
116 ret 106 ret
117 CFI_ENDPROC
118END(bad_get_user) 107END(bad_get_user)
119 108
120#ifdef CONFIG_X86_32 109#ifdef CONFIG_X86_32
121bad_get_user_8: 110bad_get_user_8:
122 CFI_STARTPROC
123 xor %edx,%edx 111 xor %edx,%edx
124 xor %ecx,%ecx 112 xor %ecx,%ecx
125 mov $(-EFAULT),%_ASM_AX 113 mov $(-EFAULT),%_ASM_AX
126 ASM_CLAC 114 ASM_CLAC
127 ret 115 ret
128 CFI_ENDPROC
129END(bad_get_user_8) 116END(bad_get_user_8)
130#endif 117#endif
131 118
diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
index 05a95e713da8..33147fef3452 100644
--- a/arch/x86/lib/iomap_copy_64.S
+++ b/arch/x86/lib/iomap_copy_64.S
@@ -16,15 +16,12 @@
16 */ 16 */
17 17
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#include <asm/dwarf2.h>
20 19
21/* 20/*
22 * override generic version in lib/iomap_copy.c 21 * override generic version in lib/iomap_copy.c
23 */ 22 */
24ENTRY(__iowrite32_copy) 23ENTRY(__iowrite32_copy)
25 CFI_STARTPROC
26 movl %edx,%ecx 24 movl %edx,%ecx
27 rep movsd 25 rep movsd
28 ret 26 ret
29 CFI_ENDPROC
30ENDPROC(__iowrite32_copy) 27ENDPROC(__iowrite32_copy)
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index b046664f5a1c..16698bba87de 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -2,7 +2,6 @@
2 2
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/cpufeature.h> 4#include <asm/cpufeature.h>
5#include <asm/dwarf2.h>
6#include <asm/alternative-asm.h> 5#include <asm/alternative-asm.h>
7 6
8/* 7/*
@@ -53,7 +52,6 @@ ENTRY(memcpy_erms)
53ENDPROC(memcpy_erms) 52ENDPROC(memcpy_erms)
54 53
55ENTRY(memcpy_orig) 54ENTRY(memcpy_orig)
56 CFI_STARTPROC
57 movq %rdi, %rax 55 movq %rdi, %rax
58 56
59 cmpq $0x20, %rdx 57 cmpq $0x20, %rdx
@@ -178,5 +176,4 @@ ENTRY(memcpy_orig)
178 176
179.Lend: 177.Lend:
180 retq 178 retq
181 CFI_ENDPROC
182ENDPROC(memcpy_orig) 179ENDPROC(memcpy_orig)
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 0f8a0d0331b9..ca2afdd6d98e 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -6,7 +6,6 @@
6 * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com> 6 * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
7 */ 7 */
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/dwarf2.h>
10#include <asm/cpufeature.h> 9#include <asm/cpufeature.h>
11#include <asm/alternative-asm.h> 10#include <asm/alternative-asm.h>
12 11
@@ -27,7 +26,6 @@
27 26
28ENTRY(memmove) 27ENTRY(memmove)
29ENTRY(__memmove) 28ENTRY(__memmove)
30 CFI_STARTPROC
31 29
32 /* Handle more 32 bytes in loop */ 30 /* Handle more 32 bytes in loop */
33 mov %rdi, %rax 31 mov %rdi, %rax
@@ -207,6 +205,5 @@ ENTRY(__memmove)
207 movb %r11b, (%rdi) 205 movb %r11b, (%rdi)
20813: 20613:
209 retq 207 retq
210 CFI_ENDPROC
211ENDPROC(__memmove) 208ENDPROC(__memmove)
212ENDPROC(memmove) 209ENDPROC(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 93118fb23976..2661fad05827 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -1,7 +1,6 @@
1/* Copyright 2002 Andi Kleen, SuSE Labs */ 1/* Copyright 2002 Andi Kleen, SuSE Labs */
2 2
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/dwarf2.h>
5#include <asm/cpufeature.h> 4#include <asm/cpufeature.h>
6#include <asm/alternative-asm.h> 5#include <asm/alternative-asm.h>
7 6
@@ -66,7 +65,6 @@ ENTRY(memset_erms)
66ENDPROC(memset_erms) 65ENDPROC(memset_erms)
67 66
68ENTRY(memset_orig) 67ENTRY(memset_orig)
69 CFI_STARTPROC
70 movq %rdi,%r10 68 movq %rdi,%r10
71 69
72 /* expand byte value */ 70 /* expand byte value */
@@ -78,7 +76,6 @@ ENTRY(memset_orig)
78 movl %edi,%r9d 76 movl %edi,%r9d
79 andl $7,%r9d 77 andl $7,%r9d
80 jnz .Lbad_alignment 78 jnz .Lbad_alignment
81 CFI_REMEMBER_STATE
82.Lafter_bad_alignment: 79.Lafter_bad_alignment:
83 80
84 movq %rdx,%rcx 81 movq %rdx,%rcx
@@ -128,7 +125,6 @@ ENTRY(memset_orig)
128 movq %r10,%rax 125 movq %r10,%rax
129 ret 126 ret
130 127
131 CFI_RESTORE_STATE
132.Lbad_alignment: 128.Lbad_alignment:
133 cmpq $7,%rdx 129 cmpq $7,%rdx
134 jbe .Lhandle_7 130 jbe .Lhandle_7
@@ -139,5 +135,4 @@ ENTRY(memset_orig)
139 subq %r8,%rdx 135 subq %r8,%rdx
140 jmp .Lafter_bad_alignment 136 jmp .Lafter_bad_alignment
141.Lfinal: 137.Lfinal:
142 CFI_ENDPROC
143ENDPROC(memset_orig) 138ENDPROC(memset_orig)
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index 3ca5218fbece..c81556409bbb 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -1,6 +1,5 @@
1#include <linux/linkage.h> 1#include <linux/linkage.h>
2#include <linux/errno.h> 2#include <linux/errno.h>
3#include <asm/dwarf2.h>
4#include <asm/asm.h> 3#include <asm/asm.h>
5#include <asm/msr.h> 4#include <asm/msr.h>
6 5
@@ -13,9 +12,8 @@
13 */ 12 */
14.macro op_safe_regs op 13.macro op_safe_regs op
15ENTRY(\op\()_safe_regs) 14ENTRY(\op\()_safe_regs)
16 CFI_STARTPROC 15 pushq %rbx
17 pushq_cfi_reg rbx 16 pushq %rbp
18 pushq_cfi_reg rbp
19 movq %rdi, %r10 /* Save pointer */ 17 movq %rdi, %r10 /* Save pointer */
20 xorl %r11d, %r11d /* Return value */ 18 xorl %r11d, %r11d /* Return value */
21 movl (%rdi), %eax 19 movl (%rdi), %eax
@@ -25,7 +23,6 @@ ENTRY(\op\()_safe_regs)
25 movl 20(%rdi), %ebp 23 movl 20(%rdi), %ebp
26 movl 24(%rdi), %esi 24 movl 24(%rdi), %esi
27 movl 28(%rdi), %edi 25 movl 28(%rdi), %edi
28 CFI_REMEMBER_STATE
291: \op 261: \op
302: movl %eax, (%r10) 272: movl %eax, (%r10)
31 movl %r11d, %eax /* Return value */ 28 movl %r11d, %eax /* Return value */
@@ -35,16 +32,14 @@ ENTRY(\op\()_safe_regs)
35 movl %ebp, 20(%r10) 32 movl %ebp, 20(%r10)
36 movl %esi, 24(%r10) 33 movl %esi, 24(%r10)
37 movl %edi, 28(%r10) 34 movl %edi, 28(%r10)
38 popq_cfi_reg rbp 35 popq %rbp
39 popq_cfi_reg rbx 36 popq %rbx
40 ret 37 ret
413: 383:
42 CFI_RESTORE_STATE
43 movl $-EIO, %r11d 39 movl $-EIO, %r11d
44 jmp 2b 40 jmp 2b
45 41
46 _ASM_EXTABLE(1b, 3b) 42 _ASM_EXTABLE(1b, 3b)
47 CFI_ENDPROC
48ENDPROC(\op\()_safe_regs) 43ENDPROC(\op\()_safe_regs)
49.endm 44.endm
50 45
@@ -52,13 +47,12 @@ ENDPROC(\op\()_safe_regs)
52 47
53.macro op_safe_regs op 48.macro op_safe_regs op
54ENTRY(\op\()_safe_regs) 49ENTRY(\op\()_safe_regs)
55 CFI_STARTPROC 50 pushl %ebx
56 pushl_cfi_reg ebx 51 pushl %ebp
57 pushl_cfi_reg ebp 52 pushl %esi
58 pushl_cfi_reg esi 53 pushl %edi
59 pushl_cfi_reg edi 54 pushl $0 /* Return value */
60 pushl_cfi $0 /* Return value */ 55 pushl %eax
61 pushl_cfi %eax
62 movl 4(%eax), %ecx 56 movl 4(%eax), %ecx
63 movl 8(%eax), %edx 57 movl 8(%eax), %edx
64 movl 12(%eax), %ebx 58 movl 12(%eax), %ebx
@@ -66,32 +60,28 @@ ENTRY(\op\()_safe_regs)
66 movl 24(%eax), %esi 60 movl 24(%eax), %esi
67 movl 28(%eax), %edi 61 movl 28(%eax), %edi
68 movl (%eax), %eax 62 movl (%eax), %eax
69 CFI_REMEMBER_STATE
701: \op 631: \op
712: pushl_cfi %eax 642: pushl %eax
72 movl 4(%esp), %eax 65 movl 4(%esp), %eax
73 popl_cfi (%eax) 66 popl (%eax)
74 addl $4, %esp 67 addl $4, %esp
75 CFI_ADJUST_CFA_OFFSET -4
76 movl %ecx, 4(%eax) 68 movl %ecx, 4(%eax)
77 movl %edx, 8(%eax) 69 movl %edx, 8(%eax)
78 movl %ebx, 12(%eax) 70 movl %ebx, 12(%eax)
79 movl %ebp, 20(%eax) 71 movl %ebp, 20(%eax)
80 movl %esi, 24(%eax) 72 movl %esi, 24(%eax)
81 movl %edi, 28(%eax) 73 movl %edi, 28(%eax)
82 popl_cfi %eax 74 popl %eax
83 popl_cfi_reg edi 75 popl %edi
84 popl_cfi_reg esi 76 popl %esi
85 popl_cfi_reg ebp 77 popl %ebp
86 popl_cfi_reg ebx 78 popl %ebx
87 ret 79 ret
883: 803:
89 CFI_RESTORE_STATE
90 movl $-EIO, 4(%esp) 81 movl $-EIO, 4(%esp)
91 jmp 2b 82 jmp 2b
92 83
93 _ASM_EXTABLE(1b, 3b) 84 _ASM_EXTABLE(1b, 3b)
94 CFI_ENDPROC
95ENDPROC(\op\()_safe_regs) 85ENDPROC(\op\()_safe_regs)
96.endm 86.endm
97 87
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index fc6ba17a7eec..e0817a12d323 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -11,7 +11,6 @@
11 * return value. 11 * return value.
12 */ 12 */
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/dwarf2.h>
15#include <asm/thread_info.h> 14#include <asm/thread_info.h>
16#include <asm/errno.h> 15#include <asm/errno.h>
17#include <asm/asm.h> 16#include <asm/asm.h>
@@ -30,11 +29,9 @@
30 * as they get called from within inline assembly. 29 * as they get called from within inline assembly.
31 */ 30 */
32 31
33#define ENTER CFI_STARTPROC ; \ 32#define ENTER GET_THREAD_INFO(%_ASM_BX)
34 GET_THREAD_INFO(%_ASM_BX)
35#define EXIT ASM_CLAC ; \ 33#define EXIT ASM_CLAC ; \
36 ret ; \ 34 ret
37 CFI_ENDPROC
38 35
39.text 36.text
40ENTRY(__put_user_1) 37ENTRY(__put_user_1)
@@ -87,7 +84,6 @@ ENTRY(__put_user_8)
87ENDPROC(__put_user_8) 84ENDPROC(__put_user_8)
88 85
89bad_put_user: 86bad_put_user:
90 CFI_STARTPROC
91 movl $-EFAULT,%eax 87 movl $-EFAULT,%eax
92 EXIT 88 EXIT
93END(bad_put_user) 89END(bad_put_user)
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
index 2322abe4da3b..40027db99140 100644
--- a/arch/x86/lib/rwsem.S
+++ b/arch/x86/lib/rwsem.S
@@ -15,7 +15,6 @@
15 15
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <asm/alternative-asm.h> 17#include <asm/alternative-asm.h>
18#include <asm/dwarf2.h>
19 18
20#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg) 19#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg)
21#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l) 20#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l)
@@ -34,10 +33,10 @@
34 */ 33 */
35 34
36#define save_common_regs \ 35#define save_common_regs \
37 pushl_cfi_reg ecx 36 pushl %ecx
38 37
39#define restore_common_regs \ 38#define restore_common_regs \
40 popl_cfi_reg ecx 39 popl %ecx
41 40
42 /* Avoid uglifying the argument copying x86-64 needs to do. */ 41 /* Avoid uglifying the argument copying x86-64 needs to do. */
43 .macro movq src, dst 42 .macro movq src, dst
@@ -64,50 +63,45 @@
64 */ 63 */
65 64
66#define save_common_regs \ 65#define save_common_regs \
67 pushq_cfi_reg rdi; \ 66 pushq %rdi; \
68 pushq_cfi_reg rsi; \ 67 pushq %rsi; \
69 pushq_cfi_reg rcx; \ 68 pushq %rcx; \
70 pushq_cfi_reg r8; \ 69 pushq %r8; \
71 pushq_cfi_reg r9; \ 70 pushq %r9; \
72 pushq_cfi_reg r10; \ 71 pushq %r10; \
73 pushq_cfi_reg r11 72 pushq %r11
74 73
75#define restore_common_regs \ 74#define restore_common_regs \
76 popq_cfi_reg r11; \ 75 popq %r11; \
77 popq_cfi_reg r10; \ 76 popq %r10; \
78 popq_cfi_reg r9; \ 77 popq %r9; \
79 popq_cfi_reg r8; \ 78 popq %r8; \
80 popq_cfi_reg rcx; \ 79 popq %rcx; \
81 popq_cfi_reg rsi; \ 80 popq %rsi; \
82 popq_cfi_reg rdi 81 popq %rdi
83 82
84#endif 83#endif
85 84
86/* Fix up special calling conventions */ 85/* Fix up special calling conventions */
87ENTRY(call_rwsem_down_read_failed) 86ENTRY(call_rwsem_down_read_failed)
88 CFI_STARTPROC
89 save_common_regs 87 save_common_regs
90 __ASM_SIZE(push,_cfi_reg) __ASM_REG(dx) 88 __ASM_SIZE(push,) %__ASM_REG(dx)
91 movq %rax,%rdi 89 movq %rax,%rdi
92 call rwsem_down_read_failed 90 call rwsem_down_read_failed
93 __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx) 91 __ASM_SIZE(pop,) %__ASM_REG(dx)
94 restore_common_regs 92 restore_common_regs
95 ret 93 ret
96 CFI_ENDPROC
97ENDPROC(call_rwsem_down_read_failed) 94ENDPROC(call_rwsem_down_read_failed)
98 95
99ENTRY(call_rwsem_down_write_failed) 96ENTRY(call_rwsem_down_write_failed)
100 CFI_STARTPROC
101 save_common_regs 97 save_common_regs
102 movq %rax,%rdi 98 movq %rax,%rdi
103 call rwsem_down_write_failed 99 call rwsem_down_write_failed
104 restore_common_regs 100 restore_common_regs
105 ret 101 ret
106 CFI_ENDPROC
107ENDPROC(call_rwsem_down_write_failed) 102ENDPROC(call_rwsem_down_write_failed)
108 103
109ENTRY(call_rwsem_wake) 104ENTRY(call_rwsem_wake)
110 CFI_STARTPROC
111 /* do nothing if still outstanding active readers */ 105 /* do nothing if still outstanding active readers */
112 __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx) 106 __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
113 jnz 1f 107 jnz 1f
@@ -116,17 +110,14 @@ ENTRY(call_rwsem_wake)
116 call rwsem_wake 110 call rwsem_wake
117 restore_common_regs 111 restore_common_regs
1181: ret 1121: ret
119 CFI_ENDPROC
120ENDPROC(call_rwsem_wake) 113ENDPROC(call_rwsem_wake)
121 114
122ENTRY(call_rwsem_downgrade_wake) 115ENTRY(call_rwsem_downgrade_wake)
123 CFI_STARTPROC
124 save_common_regs 116 save_common_regs
125 __ASM_SIZE(push,_cfi_reg) __ASM_REG(dx) 117 __ASM_SIZE(push,) %__ASM_REG(dx)
126 movq %rax,%rdi 118 movq %rax,%rdi
127 call rwsem_downgrade_wake 119 call rwsem_downgrade_wake
128 __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx) 120 __ASM_SIZE(pop,) %__ASM_REG(dx)
129 restore_common_regs 121 restore_common_regs
130 ret 122 ret
131 CFI_ENDPROC
132ENDPROC(call_rwsem_downgrade_wake) 123ENDPROC(call_rwsem_downgrade_wake)
diff --git a/arch/x86/lib/thunk_32.S b/arch/x86/lib/thunk_32.S
index 5eb715087b80..e9acf5f4fc92 100644
--- a/arch/x86/lib/thunk_32.S
+++ b/arch/x86/lib/thunk_32.S
@@ -6,16 +6,14 @@
6 */ 6 */
7 #include <linux/linkage.h> 7 #include <linux/linkage.h>
8 #include <asm/asm.h> 8 #include <asm/asm.h>
9 #include <asm/dwarf2.h>
10 9
11 /* put return address in eax (arg1) */ 10 /* put return address in eax (arg1) */
12 .macro THUNK name, func, put_ret_addr_in_eax=0 11 .macro THUNK name, func, put_ret_addr_in_eax=0
13 .globl \name 12 .globl \name
14\name: 13\name:
15 CFI_STARTPROC 14 pushl %eax
16 pushl_cfi_reg eax 15 pushl %ecx
17 pushl_cfi_reg ecx 16 pushl %edx
18 pushl_cfi_reg edx
19 17
20 .if \put_ret_addr_in_eax 18 .if \put_ret_addr_in_eax
21 /* Place EIP in the arg1 */ 19 /* Place EIP in the arg1 */
@@ -23,11 +21,10 @@
23 .endif 21 .endif
24 22
25 call \func 23 call \func
26 popl_cfi_reg edx 24 popl %edx
27 popl_cfi_reg ecx 25 popl %ecx
28 popl_cfi_reg eax 26 popl %eax
29 ret 27 ret
30 CFI_ENDPROC
31 _ASM_NOKPROBE(\name) 28 _ASM_NOKPROBE(\name)
32 .endm 29 .endm
33 30
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
index f89ba4e93025..10f555e435e1 100644
--- a/arch/x86/lib/thunk_64.S
+++ b/arch/x86/lib/thunk_64.S
@@ -6,7 +6,6 @@
6 * Subject to the GNU public license, v.2. No warranty of any kind. 6 * Subject to the GNU public license, v.2. No warranty of any kind.
7 */ 7 */
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/dwarf2.h>
10#include <asm/calling.h> 9#include <asm/calling.h>
11#include <asm/asm.h> 10#include <asm/asm.h>
12 11
@@ -14,27 +13,25 @@
14 .macro THUNK name, func, put_ret_addr_in_rdi=0 13 .macro THUNK name, func, put_ret_addr_in_rdi=0
15 .globl \name 14 .globl \name
16\name: 15\name:
17 CFI_STARTPROC
18 16
19 /* this one pushes 9 elems, the next one would be %rIP */ 17 /* this one pushes 9 elems, the next one would be %rIP */
20 pushq_cfi_reg rdi 18 pushq %rdi
21 pushq_cfi_reg rsi 19 pushq %rsi
22 pushq_cfi_reg rdx 20 pushq %rdx
23 pushq_cfi_reg rcx 21 pushq %rcx
24 pushq_cfi_reg rax 22 pushq %rax
25 pushq_cfi_reg r8 23 pushq %r8
26 pushq_cfi_reg r9 24 pushq %r9
27 pushq_cfi_reg r10 25 pushq %r10
28 pushq_cfi_reg r11 26 pushq %r11
29 27
30 .if \put_ret_addr_in_rdi 28 .if \put_ret_addr_in_rdi
31 /* 9*8(%rsp) is return addr on stack */ 29 /* 9*8(%rsp) is return addr on stack */
32 movq_cfi_restore 9*8, rdi 30 movq 9*8(%rsp), %rdi
33 .endif 31 .endif
34 32
35 call \func 33 call \func
36 jmp restore 34 jmp restore
37 CFI_ENDPROC
38 _ASM_NOKPROBE(\name) 35 _ASM_NOKPROBE(\name)
39 .endm 36 .endm
40 37
@@ -57,19 +54,16 @@
57#if defined(CONFIG_TRACE_IRQFLAGS) \ 54#if defined(CONFIG_TRACE_IRQFLAGS) \
58 || defined(CONFIG_DEBUG_LOCK_ALLOC) \ 55 || defined(CONFIG_DEBUG_LOCK_ALLOC) \
59 || defined(CONFIG_PREEMPT) 56 || defined(CONFIG_PREEMPT)
60 CFI_STARTPROC
61 CFI_ADJUST_CFA_OFFSET 9*8
62restore: 57restore:
63 popq_cfi_reg r11 58 popq %r11
64 popq_cfi_reg r10 59 popq %r10
65 popq_cfi_reg r9 60 popq %r9
66 popq_cfi_reg r8 61 popq %r8
67 popq_cfi_reg rax 62 popq %rax
68 popq_cfi_reg rcx 63 popq %rcx
69 popq_cfi_reg rdx 64 popq %rdx
70 popq_cfi_reg rsi 65 popq %rsi
71 popq_cfi_reg rdi 66 popq %rdi
72 ret 67 ret
73 CFI_ENDPROC
74 _ASM_NOKPROBE(restore) 68 _ASM_NOKPROBE(restore)
75#endif 69#endif
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 6440221ced0d..4093216b3791 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -8,7 +8,6 @@
8 * of the License. 8 * of the License.
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/dwarf2.h>
12 11
13/* 12/*
14 * Calling convention : 13 * Calling convention :