aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/entry_64.S
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-11-20 04:48:28 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-20 04:48:31 -0500
commitc032a2de4c1a82187e9a754511043be47c8a92b5 (patch)
tree6d20bfcff683555b641a376ffdffb2dbc1f1599a /arch/x86/kernel/entry_64.S
parent722024dbb74f3ea316c285c0a71a4512e113b0c4 (diff)
parentcbe9ee00cea58d1f77b172fe22a51080e90877f2 (diff)
Merge branch 'x86/cleanups' into x86/irq
[ merged x86/cleanups into x86/irq to enable a wider IRQ entry code patch to be applied, which depends on a cleanup patch in x86/cleanups. ]
Diffstat (limited to 'arch/x86/kernel/entry_64.S')
-rw-r--r--arch/x86/kernel/entry_64.S190
1 files changed, 95 insertions, 95 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 369de6973c58..dbf06a0ef3d5 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -11,15 +11,15 @@
11 * 11 *
12 * NOTE: This code handles signal-recognition, which happens every time 12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call. 13 * after an interrupt and after each system call.
14 * 14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is 15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al. 16 * only done for syscall tracing, signals or fork/exec et.al.
17 * 17 *
18 * A note on terminology: 18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP 19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack. 20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11. 21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved. 22 * - full stack frame: Like partial stack frame, but all register saved.
23 * 23 *
24 * Some macro usage: 24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better 25 * - CFI macros are used to generate dwarf2 unwind information for better
@@ -142,7 +142,7 @@ END(mcount)
142 142
143#ifndef CONFIG_PREEMPT 143#ifndef CONFIG_PREEMPT
144#define retint_kernel retint_restore_args 144#define retint_kernel retint_restore_args
145#endif 145#endif
146 146
147#ifdef CONFIG_PARAVIRT 147#ifdef CONFIG_PARAVIRT
148ENTRY(native_usergs_sysret64) 148ENTRY(native_usergs_sysret64)
@@ -161,14 +161,14 @@ ENTRY(native_usergs_sysret64)
161.endm 161.endm
162 162
163/* 163/*
164 * C code is not supposed to know about undefined top of stack. Every time 164 * C code is not supposed to know about undefined top of stack. Every time
165 * a C function with an pt_regs argument is called from the SYSCALL based 165 * a C function with an pt_regs argument is called from the SYSCALL based
166 * fast path FIXUP_TOP_OF_STACK is needed. 166 * fast path FIXUP_TOP_OF_STACK is needed.
167 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs 167 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
168 * manipulation. 168 * manipulation.
169 */ 169 */
170 170
171 /* %rsp:at FRAMEEND */ 171 /* %rsp:at FRAMEEND */
172 .macro FIXUP_TOP_OF_STACK tmp 172 .macro FIXUP_TOP_OF_STACK tmp
173 movq %gs:pda_oldrsp,\tmp 173 movq %gs:pda_oldrsp,\tmp
174 movq \tmp,RSP(%rsp) 174 movq \tmp,RSP(%rsp)
@@ -244,8 +244,8 @@ ENTRY(native_usergs_sysret64)
244 .endm 244 .endm
245/* 245/*
246 * A newly forked process directly context switches into this. 246 * A newly forked process directly context switches into this.
247 */ 247 */
248/* rdi: prev */ 248/* rdi: prev */
249ENTRY(ret_from_fork) 249ENTRY(ret_from_fork)
250 CFI_DEFAULT_STACK 250 CFI_DEFAULT_STACK
251 push kernel_eflags(%rip) 251 push kernel_eflags(%rip)
@@ -256,7 +256,7 @@ ENTRY(ret_from_fork)
256 GET_THREAD_INFO(%rcx) 256 GET_THREAD_INFO(%rcx)
257 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) 257 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
258 jnz rff_trace 258 jnz rff_trace
259rff_action: 259rff_action:
260 RESTORE_REST 260 RESTORE_REST
261 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread? 261 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
262 je int_ret_from_sys_call 262 je int_ret_from_sys_call
@@ -267,7 +267,7 @@ rff_action:
267rff_trace: 267rff_trace:
268 movq %rsp,%rdi 268 movq %rsp,%rdi
269 call syscall_trace_leave 269 call syscall_trace_leave
270 GET_THREAD_INFO(%rcx) 270 GET_THREAD_INFO(%rcx)
271 jmp rff_action 271 jmp rff_action
272 CFI_ENDPROC 272 CFI_ENDPROC
273END(ret_from_fork) 273END(ret_from_fork)
@@ -278,20 +278,20 @@ END(ret_from_fork)
278 * SYSCALL does not save anything on the stack and does not change the 278 * SYSCALL does not save anything on the stack and does not change the
279 * stack pointer. 279 * stack pointer.
280 */ 280 */
281 281
282/* 282/*
283 * Register setup: 283 * Register setup:
284 * rax system call number 284 * rax system call number
285 * rdi arg0 285 * rdi arg0
286 * rcx return address for syscall/sysret, C arg3 286 * rcx return address for syscall/sysret, C arg3
287 * rsi arg1 287 * rsi arg1
288 * rdx arg2 288 * rdx arg2
289 * r10 arg3 (--> moved to rcx for C) 289 * r10 arg3 (--> moved to rcx for C)
290 * r8 arg4 290 * r8 arg4
291 * r9 arg5 291 * r9 arg5
292 * r11 eflags for syscall/sysret, temporary for C 292 * r11 eflags for syscall/sysret, temporary for C
293 * r12-r15,rbp,rbx saved by C code, not touched. 293 * r12-r15,rbp,rbx saved by C code, not touched.
294 * 294 *
295 * Interrupts are off on entry. 295 * Interrupts are off on entry.
296 * Only called from user space. 296 * Only called from user space.
297 * 297 *
@@ -301,7 +301,7 @@ END(ret_from_fork)
301 * When user can change the frames always force IRET. That is because 301 * When user can change the frames always force IRET. That is because
302 * it deals with uncanonical addresses better. SYSRET has trouble 302 * it deals with uncanonical addresses better. SYSRET has trouble
303 * with them due to bugs in both AMD and Intel CPUs. 303 * with them due to bugs in both AMD and Intel CPUs.
304 */ 304 */
305 305
306ENTRY(system_call) 306ENTRY(system_call)
307 CFI_STARTPROC simple 307 CFI_STARTPROC simple
@@ -317,7 +317,7 @@ ENTRY(system_call)
317 */ 317 */
318ENTRY(system_call_after_swapgs) 318ENTRY(system_call_after_swapgs)
319 319
320 movq %rsp,%gs:pda_oldrsp 320 movq %rsp,%gs:pda_oldrsp
321 movq %gs:pda_kernelstack,%rsp 321 movq %gs:pda_kernelstack,%rsp
322 /* 322 /*
323 * No need to follow this irqs off/on section - it's straight 323 * No need to follow this irqs off/on section - it's straight
@@ -325,7 +325,7 @@ ENTRY(system_call_after_swapgs)
325 */ 325 */
326 ENABLE_INTERRUPTS(CLBR_NONE) 326 ENABLE_INTERRUPTS(CLBR_NONE)
327 SAVE_ARGS 8,1 327 SAVE_ARGS 8,1
328 movq %rax,ORIG_RAX-ARGOFFSET(%rsp) 328 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
329 movq %rcx,RIP-ARGOFFSET(%rsp) 329 movq %rcx,RIP-ARGOFFSET(%rsp)
330 CFI_REL_OFFSET rip,RIP-ARGOFFSET 330 CFI_REL_OFFSET rip,RIP-ARGOFFSET
331 GET_THREAD_INFO(%rcx) 331 GET_THREAD_INFO(%rcx)
@@ -339,19 +339,19 @@ system_call_fastpath:
339 movq %rax,RAX-ARGOFFSET(%rsp) 339 movq %rax,RAX-ARGOFFSET(%rsp)
340/* 340/*
341 * Syscall return path ending with SYSRET (fast path) 341 * Syscall return path ending with SYSRET (fast path)
342 * Has incomplete stack frame and undefined top of stack. 342 * Has incomplete stack frame and undefined top of stack.
343 */ 343 */
344ret_from_sys_call: 344ret_from_sys_call:
345 movl $_TIF_ALLWORK_MASK,%edi 345 movl $_TIF_ALLWORK_MASK,%edi
346 /* edi: flagmask */ 346 /* edi: flagmask */
347sysret_check: 347sysret_check:
348 LOCKDEP_SYS_EXIT 348 LOCKDEP_SYS_EXIT
349 GET_THREAD_INFO(%rcx) 349 GET_THREAD_INFO(%rcx)
350 DISABLE_INTERRUPTS(CLBR_NONE) 350 DISABLE_INTERRUPTS(CLBR_NONE)
351 TRACE_IRQS_OFF 351 TRACE_IRQS_OFF
352 movl TI_flags(%rcx),%edx 352 movl TI_flags(%rcx),%edx
353 andl %edi,%edx 353 andl %edi,%edx
354 jnz sysret_careful 354 jnz sysret_careful
355 CFI_REMEMBER_STATE 355 CFI_REMEMBER_STATE
356 /* 356 /*
357 * sysretq will re-enable interrupts: 357 * sysretq will re-enable interrupts:
@@ -366,7 +366,7 @@ sysret_check:
366 366
367 CFI_RESTORE_STATE 367 CFI_RESTORE_STATE
368 /* Handle reschedules */ 368 /* Handle reschedules */
369 /* edx: work, edi: workmask */ 369 /* edx: work, edi: workmask */
370sysret_careful: 370sysret_careful:
371 bt $TIF_NEED_RESCHED,%edx 371 bt $TIF_NEED_RESCHED,%edx
372 jnc sysret_signal 372 jnc sysret_signal
@@ -379,7 +379,7 @@ sysret_careful:
379 CFI_ADJUST_CFA_OFFSET -8 379 CFI_ADJUST_CFA_OFFSET -8
380 jmp sysret_check 380 jmp sysret_check
381 381
382 /* Handle a signal */ 382 /* Handle a signal */
383sysret_signal: 383sysret_signal:
384 TRACE_IRQS_ON 384 TRACE_IRQS_ON
385 ENABLE_INTERRUPTS(CLBR_NONE) 385 ENABLE_INTERRUPTS(CLBR_NONE)
@@ -398,7 +398,7 @@ sysret_signal:
398 DISABLE_INTERRUPTS(CLBR_NONE) 398 DISABLE_INTERRUPTS(CLBR_NONE)
399 TRACE_IRQS_OFF 399 TRACE_IRQS_OFF
400 jmp int_with_check 400 jmp int_with_check
401 401
402badsys: 402badsys:
403 movq $-ENOSYS,RAX-ARGOFFSET(%rsp) 403 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
404 jmp ret_from_sys_call 404 jmp ret_from_sys_call
@@ -437,7 +437,7 @@ sysret_audit:
437#endif /* CONFIG_AUDITSYSCALL */ 437#endif /* CONFIG_AUDITSYSCALL */
438 438
439 /* Do syscall tracing */ 439 /* Do syscall tracing */
440tracesys: 440tracesys:
441#ifdef CONFIG_AUDITSYSCALL 441#ifdef CONFIG_AUDITSYSCALL
442 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx) 442 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
443 jz auditsys 443 jz auditsys
@@ -460,8 +460,8 @@ tracesys:
460 call *sys_call_table(,%rax,8) 460 call *sys_call_table(,%rax,8)
461 movq %rax,RAX-ARGOFFSET(%rsp) 461 movq %rax,RAX-ARGOFFSET(%rsp)
462 /* Use IRET because user could have changed frame */ 462 /* Use IRET because user could have changed frame */
463 463
464/* 464/*
465 * Syscall return path ending with IRET. 465 * Syscall return path ending with IRET.
466 * Has correct top of stack, but partial stack frame. 466 * Has correct top of stack, but partial stack frame.
467 */ 467 */
@@ -505,18 +505,18 @@ int_very_careful:
505 TRACE_IRQS_ON 505 TRACE_IRQS_ON
506 ENABLE_INTERRUPTS(CLBR_NONE) 506 ENABLE_INTERRUPTS(CLBR_NONE)
507 SAVE_REST 507 SAVE_REST
508 /* Check for syscall exit trace */ 508 /* Check for syscall exit trace */
509 testl $_TIF_WORK_SYSCALL_EXIT,%edx 509 testl $_TIF_WORK_SYSCALL_EXIT,%edx
510 jz int_signal 510 jz int_signal
511 pushq %rdi 511 pushq %rdi
512 CFI_ADJUST_CFA_OFFSET 8 512 CFI_ADJUST_CFA_OFFSET 8
513 leaq 8(%rsp),%rdi # &ptregs -> arg1 513 leaq 8(%rsp),%rdi # &ptregs -> arg1
514 call syscall_trace_leave 514 call syscall_trace_leave
515 popq %rdi 515 popq %rdi
516 CFI_ADJUST_CFA_OFFSET -8 516 CFI_ADJUST_CFA_OFFSET -8
517 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi 517 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
518 jmp int_restore_rest 518 jmp int_restore_rest
519 519
520int_signal: 520int_signal:
521 testl $_TIF_DO_NOTIFY_MASK,%edx 521 testl $_TIF_DO_NOTIFY_MASK,%edx
522 jz 1f 522 jz 1f
@@ -531,11 +531,11 @@ int_restore_rest:
531 jmp int_with_check 531 jmp int_with_check
532 CFI_ENDPROC 532 CFI_ENDPROC
533END(system_call) 533END(system_call)
534 534
535/* 535/*
536 * Certain special system calls that need to save a complete full stack frame. 536 * Certain special system calls that need to save a complete full stack frame.
537 */ 537 */
538 538
539 .macro PTREGSCALL label,func,arg 539 .macro PTREGSCALL label,func,arg
540 .globl \label 540 .globl \label
541\label: 541\label:
@@ -572,7 +572,7 @@ ENTRY(ptregscall_common)
572 ret 572 ret
573 CFI_ENDPROC 573 CFI_ENDPROC
574END(ptregscall_common) 574END(ptregscall_common)
575 575
576ENTRY(stub_execve) 576ENTRY(stub_execve)
577 CFI_STARTPROC 577 CFI_STARTPROC
578 popq %r11 578 popq %r11
@@ -588,11 +588,11 @@ ENTRY(stub_execve)
588 jmp int_ret_from_sys_call 588 jmp int_ret_from_sys_call
589 CFI_ENDPROC 589 CFI_ENDPROC
590END(stub_execve) 590END(stub_execve)
591 591
592/* 592/*
593 * sigreturn is special because it needs to restore all registers on return. 593 * sigreturn is special because it needs to restore all registers on return.
594 * This cannot be done with SYSRET, so use the IRET return path instead. 594 * This cannot be done with SYSRET, so use the IRET return path instead.
595 */ 595 */
596ENTRY(stub_rt_sigreturn) 596ENTRY(stub_rt_sigreturn)
597 CFI_STARTPROC 597 CFI_STARTPROC
598 addq $8, %rsp 598 addq $8, %rsp
@@ -731,12 +731,12 @@ exit_intr:
731 GET_THREAD_INFO(%rcx) 731 GET_THREAD_INFO(%rcx)
732 testl $3,CS-ARGOFFSET(%rsp) 732 testl $3,CS-ARGOFFSET(%rsp)
733 je retint_kernel 733 je retint_kernel
734 734
735 /* Interrupt came from user space */ 735 /* Interrupt came from user space */
736 /* 736 /*
737 * Has a correct top of stack, but a partial stack frame 737 * Has a correct top of stack, but a partial stack frame
738 * %rcx: thread info. Interrupts off. 738 * %rcx: thread info. Interrupts off.
739 */ 739 */
740retint_with_reschedule: 740retint_with_reschedule:
741 movl $_TIF_WORK_MASK,%edi 741 movl $_TIF_WORK_MASK,%edi
742retint_check: 742retint_check:
@@ -809,20 +809,20 @@ retint_careful:
809 pushq %rdi 809 pushq %rdi
810 CFI_ADJUST_CFA_OFFSET 8 810 CFI_ADJUST_CFA_OFFSET 8
811 call schedule 811 call schedule
812 popq %rdi 812 popq %rdi
813 CFI_ADJUST_CFA_OFFSET -8 813 CFI_ADJUST_CFA_OFFSET -8
814 GET_THREAD_INFO(%rcx) 814 GET_THREAD_INFO(%rcx)
815 DISABLE_INTERRUPTS(CLBR_NONE) 815 DISABLE_INTERRUPTS(CLBR_NONE)
816 TRACE_IRQS_OFF 816 TRACE_IRQS_OFF
817 jmp retint_check 817 jmp retint_check
818 818
819retint_signal: 819retint_signal:
820 testl $_TIF_DO_NOTIFY_MASK,%edx 820 testl $_TIF_DO_NOTIFY_MASK,%edx
821 jz retint_swapgs 821 jz retint_swapgs
822 TRACE_IRQS_ON 822 TRACE_IRQS_ON
823 ENABLE_INTERRUPTS(CLBR_NONE) 823 ENABLE_INTERRUPTS(CLBR_NONE)
824 SAVE_REST 824 SAVE_REST
825 movq $-1,ORIG_RAX(%rsp) 825 movq $-1,ORIG_RAX(%rsp)
826 xorl %esi,%esi # oldset 826 xorl %esi,%esi # oldset
827 movq %rsp,%rdi # &pt_regs 827 movq %rsp,%rdi # &pt_regs
828 call do_notify_resume 828 call do_notify_resume
@@ -844,14 +844,14 @@ ENTRY(retint_kernel)
844 jnc retint_restore_args 844 jnc retint_restore_args
845 call preempt_schedule_irq 845 call preempt_schedule_irq
846 jmp exit_intr 846 jmp exit_intr
847#endif 847#endif
848 848
849 CFI_ENDPROC 849 CFI_ENDPROC
850END(common_interrupt) 850END(common_interrupt)
851 851
852/* 852/*
853 * APIC interrupts. 853 * APIC interrupts.
854 */ 854 */
855 .macro apicinterrupt num,func 855 .macro apicinterrupt num,func
856 INTR_FRAME 856 INTR_FRAME
857 pushq $~(\num) 857 pushq $~(\num)
@@ -869,14 +869,14 @@ ENTRY(threshold_interrupt)
869 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt 869 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
870END(threshold_interrupt) 870END(threshold_interrupt)
871 871
872#ifdef CONFIG_SMP 872#ifdef CONFIG_SMP
873ENTRY(reschedule_interrupt) 873ENTRY(reschedule_interrupt)
874 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt 874 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
875END(reschedule_interrupt) 875END(reschedule_interrupt)
876 876
877 .macro INVALIDATE_ENTRY num 877 .macro INVALIDATE_ENTRY num
878ENTRY(invalidate_interrupt\num) 878ENTRY(invalidate_interrupt\num)
879 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt 879 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
880END(invalidate_interrupt\num) 880END(invalidate_interrupt\num)
881 .endm 881 .endm
882 882
@@ -915,22 +915,22 @@ END(error_interrupt)
915ENTRY(spurious_interrupt) 915ENTRY(spurious_interrupt)
916 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt 916 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
917END(spurious_interrupt) 917END(spurious_interrupt)
918 918
919/* 919/*
920 * Exception entry points. 920 * Exception entry points.
921 */ 921 */
922 .macro zeroentry sym 922 .macro zeroentry sym
923 INTR_FRAME 923 INTR_FRAME
924 PARAVIRT_ADJUST_EXCEPTION_FRAME 924 PARAVIRT_ADJUST_EXCEPTION_FRAME
925 pushq $0 /* push error code/oldrax */ 925 pushq $0 /* push error code/oldrax */
926 CFI_ADJUST_CFA_OFFSET 8 926 CFI_ADJUST_CFA_OFFSET 8
927 pushq %rax /* push real oldrax to the rdi slot */ 927 pushq %rax /* push real oldrax to the rdi slot */
928 CFI_ADJUST_CFA_OFFSET 8 928 CFI_ADJUST_CFA_OFFSET 8
929 CFI_REL_OFFSET rax,0 929 CFI_REL_OFFSET rax,0
930 leaq \sym(%rip),%rax 930 leaq \sym(%rip),%rax
931 jmp error_entry 931 jmp error_entry
932 CFI_ENDPROC 932 CFI_ENDPROC
933 .endm 933 .endm
934 934
935 .macro errorentry sym 935 .macro errorentry sym
936 XCPT_FRAME 936 XCPT_FRAME
@@ -1044,13 +1044,13 @@ paranoid_schedule\trace:
1044 1044
1045/* 1045/*
1046 * Exception entry point. This expects an error code/orig_rax on the stack 1046 * Exception entry point. This expects an error code/orig_rax on the stack
1047 * and the exception handler in %rax. 1047 * and the exception handler in %rax.
1048 */ 1048 */
1049KPROBE_ENTRY(error_entry) 1049KPROBE_ENTRY(error_entry)
1050 _frame RDI 1050 _frame RDI
1051 CFI_REL_OFFSET rax,0 1051 CFI_REL_OFFSET rax,0
1052 /* rdi slot contains rax, oldrax contains error code */ 1052 /* rdi slot contains rax, oldrax contains error code */
1053 cld 1053 cld
1054 subq $14*8,%rsp 1054 subq $14*8,%rsp
1055 CFI_ADJUST_CFA_OFFSET (14*8) 1055 CFI_ADJUST_CFA_OFFSET (14*8)
1056 movq %rsi,13*8(%rsp) 1056 movq %rsi,13*8(%rsp)
@@ -1061,7 +1061,7 @@ KPROBE_ENTRY(error_entry)
1061 CFI_REL_OFFSET rdx,RDX 1061 CFI_REL_OFFSET rdx,RDX
1062 movq %rcx,11*8(%rsp) 1062 movq %rcx,11*8(%rsp)
1063 CFI_REL_OFFSET rcx,RCX 1063 CFI_REL_OFFSET rcx,RCX
1064 movq %rsi,10*8(%rsp) /* store rax */ 1064 movq %rsi,10*8(%rsp) /* store rax */
1065 CFI_REL_OFFSET rax,RAX 1065 CFI_REL_OFFSET rax,RAX
1066 movq %r8, 9*8(%rsp) 1066 movq %r8, 9*8(%rsp)
1067 CFI_REL_OFFSET r8,R8 1067 CFI_REL_OFFSET r8,R8
@@ -1071,29 +1071,29 @@ KPROBE_ENTRY(error_entry)
1071 CFI_REL_OFFSET r10,R10 1071 CFI_REL_OFFSET r10,R10
1072 movq %r11,6*8(%rsp) 1072 movq %r11,6*8(%rsp)
1073 CFI_REL_OFFSET r11,R11 1073 CFI_REL_OFFSET r11,R11
1074 movq %rbx,5*8(%rsp) 1074 movq %rbx,5*8(%rsp)
1075 CFI_REL_OFFSET rbx,RBX 1075 CFI_REL_OFFSET rbx,RBX
1076 movq %rbp,4*8(%rsp) 1076 movq %rbp,4*8(%rsp)
1077 CFI_REL_OFFSET rbp,RBP 1077 CFI_REL_OFFSET rbp,RBP
1078 movq %r12,3*8(%rsp) 1078 movq %r12,3*8(%rsp)
1079 CFI_REL_OFFSET r12,R12 1079 CFI_REL_OFFSET r12,R12
1080 movq %r13,2*8(%rsp) 1080 movq %r13,2*8(%rsp)
1081 CFI_REL_OFFSET r13,R13 1081 CFI_REL_OFFSET r13,R13
1082 movq %r14,1*8(%rsp) 1082 movq %r14,1*8(%rsp)
1083 CFI_REL_OFFSET r14,R14 1083 CFI_REL_OFFSET r14,R14
1084 movq %r15,(%rsp) 1084 movq %r15,(%rsp)
1085 CFI_REL_OFFSET r15,R15 1085 CFI_REL_OFFSET r15,R15
1086 xorl %ebx,%ebx 1086 xorl %ebx,%ebx
1087 testl $3,CS(%rsp) 1087 testl $3,CS(%rsp)
1088 je error_kernelspace 1088 je error_kernelspace
1089error_swapgs: 1089error_swapgs:
1090 SWAPGS 1090 SWAPGS
1091error_sti: 1091error_sti:
1092 TRACE_IRQS_OFF 1092 TRACE_IRQS_OFF
1093 movq %rdi,RDI(%rsp) 1093 movq %rdi,RDI(%rsp)
1094 CFI_REL_OFFSET rdi,RDI 1094 CFI_REL_OFFSET rdi,RDI
1095 movq %rsp,%rdi 1095 movq %rsp,%rdi
1096 movq ORIG_RAX(%rsp),%rsi /* get error code */ 1096 movq ORIG_RAX(%rsp),%rsi /* get error code */
1097 movq $-1,ORIG_RAX(%rsp) 1097 movq $-1,ORIG_RAX(%rsp)
1098 call *%rax 1098 call *%rax
1099 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ 1099 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
@@ -1102,7 +1102,7 @@ error_exit:
1102 RESTORE_REST 1102 RESTORE_REST
1103 DISABLE_INTERRUPTS(CLBR_NONE) 1103 DISABLE_INTERRUPTS(CLBR_NONE)
1104 TRACE_IRQS_OFF 1104 TRACE_IRQS_OFF
1105 GET_THREAD_INFO(%rcx) 1105 GET_THREAD_INFO(%rcx)
1106 testl %eax,%eax 1106 testl %eax,%eax
1107 jne retint_kernel 1107 jne retint_kernel
1108 LOCKDEP_SYS_EXIT_IRQ 1108 LOCKDEP_SYS_EXIT_IRQ
@@ -1118,7 +1118,7 @@ error_kernelspace:
1118 /* There are two places in the kernel that can potentially fault with 1118 /* There are two places in the kernel that can potentially fault with
1119 usergs. Handle them here. The exception handlers after 1119 usergs. Handle them here. The exception handlers after
1120 iret run with kernel gs again, so don't set the user space flag. 1120 iret run with kernel gs again, so don't set the user space flag.
1121 B stepping K8s sometimes report an truncated RIP for IRET 1121 B stepping K8s sometimes report an truncated RIP for IRET
1122 exceptions returning to compat mode. Check for these here too. */ 1122 exceptions returning to compat mode. Check for these here too. */
1123 leaq irq_return(%rip),%rcx 1123 leaq irq_return(%rip),%rcx
1124 cmpq %rcx,RIP(%rsp) 1124 cmpq %rcx,RIP(%rsp)
@@ -1130,17 +1130,17 @@ error_kernelspace:
1130 je error_swapgs 1130 je error_swapgs
1131 jmp error_sti 1131 jmp error_sti
1132KPROBE_END(error_entry) 1132KPROBE_END(error_entry)
1133 1133
1134 /* Reload gs selector with exception handling */ 1134 /* Reload gs selector with exception handling */
1135 /* edi: new selector */ 1135 /* edi: new selector */
1136ENTRY(native_load_gs_index) 1136ENTRY(native_load_gs_index)
1137 CFI_STARTPROC 1137 CFI_STARTPROC
1138 pushf 1138 pushf
1139 CFI_ADJUST_CFA_OFFSET 8 1139 CFI_ADJUST_CFA_OFFSET 8
1140 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) 1140 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1141 SWAPGS 1141 SWAPGS
1142gs_change: 1142gs_change:
1143 movl %edi,%gs 1143 movl %edi,%gs
11442: mfence /* workaround */ 11442: mfence /* workaround */
1145 SWAPGS 1145 SWAPGS
1146 popf 1146 popf
@@ -1148,20 +1148,20 @@ gs_change:
1148 ret 1148 ret
1149 CFI_ENDPROC 1149 CFI_ENDPROC
1150ENDPROC(native_load_gs_index) 1150ENDPROC(native_load_gs_index)
1151 1151
1152 .section __ex_table,"a" 1152 .section __ex_table,"a"
1153 .align 8 1153 .align 8
1154 .quad gs_change,bad_gs 1154 .quad gs_change,bad_gs
1155 .previous 1155 .previous
1156 .section .fixup,"ax" 1156 .section .fixup,"ax"
1157 /* running with kernelgs */ 1157 /* running with kernelgs */
1158bad_gs: 1158bad_gs:
1159 SWAPGS /* switch back to user gs */ 1159 SWAPGS /* switch back to user gs */
1160 xorl %eax,%eax 1160 xorl %eax,%eax
1161 movl %eax,%gs 1161 movl %eax,%gs
1162 jmp 2b 1162 jmp 2b
1163 .previous 1163 .previous
1164 1164
1165/* 1165/*
1166 * Create a kernel thread. 1166 * Create a kernel thread.
1167 * 1167 *
@@ -1184,7 +1184,7 @@ ENTRY(kernel_thread)
1184 1184
1185 xorl %r8d,%r8d 1185 xorl %r8d,%r8d
1186 xorl %r9d,%r9d 1186 xorl %r9d,%r9d
1187 1187
1188 # clone now 1188 # clone now
1189 call do_fork 1189 call do_fork
1190 movq %rax,RAX(%rsp) 1190 movq %rax,RAX(%rsp)
@@ -1195,14 +1195,14 @@ ENTRY(kernel_thread)
1195 * so internally to the x86_64 port you can rely on kernel_thread() 1195 * so internally to the x86_64 port you can rely on kernel_thread()
1196 * not to reschedule the child before returning, this avoids the need 1196 * not to reschedule the child before returning, this avoids the need
1197 * of hacks for example to fork off the per-CPU idle tasks. 1197 * of hacks for example to fork off the per-CPU idle tasks.
1198 * [Hopefully no generic code relies on the reschedule -AK] 1198 * [Hopefully no generic code relies on the reschedule -AK]
1199 */ 1199 */
1200 RESTORE_ALL 1200 RESTORE_ALL
1201 UNFAKE_STACK_FRAME 1201 UNFAKE_STACK_FRAME
1202 ret 1202 ret
1203 CFI_ENDPROC 1203 CFI_ENDPROC
1204ENDPROC(kernel_thread) 1204ENDPROC(kernel_thread)
1205 1205
1206child_rip: 1206child_rip:
1207 pushq $0 # fake return address 1207 pushq $0 # fake return address
1208 CFI_STARTPROC 1208 CFI_STARTPROC
@@ -1237,10 +1237,10 @@ ENDPROC(child_rip)
1237ENTRY(kernel_execve) 1237ENTRY(kernel_execve)
1238 CFI_STARTPROC 1238 CFI_STARTPROC
1239 FAKE_STACK_FRAME $0 1239 FAKE_STACK_FRAME $0
1240 SAVE_ALL 1240 SAVE_ALL
1241 movq %rsp,%rcx 1241 movq %rsp,%rcx
1242 call sys_execve 1242 call sys_execve
1243 movq %rax, RAX(%rsp) 1243 movq %rax, RAX(%rsp)
1244 RESTORE_REST 1244 RESTORE_REST
1245 testq %rax,%rax 1245 testq %rax,%rax
1246 je int_ret_from_sys_call 1246 je int_ret_from_sys_call
@@ -1259,7 +1259,7 @@ ENTRY(coprocessor_error)
1259END(coprocessor_error) 1259END(coprocessor_error)
1260 1260
1261ENTRY(simd_coprocessor_error) 1261ENTRY(simd_coprocessor_error)
1262 zeroentry do_simd_coprocessor_error 1262 zeroentry do_simd_coprocessor_error
1263END(simd_coprocessor_error) 1263END(simd_coprocessor_error)
1264 1264
1265ENTRY(device_not_available) 1265ENTRY(device_not_available)
@@ -1271,12 +1271,12 @@ KPROBE_ENTRY(debug)
1271 INTR_FRAME 1271 INTR_FRAME
1272 PARAVIRT_ADJUST_EXCEPTION_FRAME 1272 PARAVIRT_ADJUST_EXCEPTION_FRAME
1273 pushq $0 1273 pushq $0
1274 CFI_ADJUST_CFA_OFFSET 8 1274 CFI_ADJUST_CFA_OFFSET 8
1275 paranoidentry do_debug, DEBUG_STACK 1275 paranoidentry do_debug, DEBUG_STACK
1276 paranoidexit 1276 paranoidexit
1277KPROBE_END(debug) 1277KPROBE_END(debug)
1278 1278
1279 /* runs on exception stack */ 1279 /* runs on exception stack */
1280KPROBE_ENTRY(nmi) 1280KPROBE_ENTRY(nmi)
1281 INTR_FRAME 1281 INTR_FRAME
1282 PARAVIRT_ADJUST_EXCEPTION_FRAME 1282 PARAVIRT_ADJUST_EXCEPTION_FRAME
@@ -1310,7 +1310,7 @@ ENTRY(bounds)
1310END(bounds) 1310END(bounds)
1311 1311
1312ENTRY(invalid_op) 1312ENTRY(invalid_op)
1313 zeroentry do_invalid_op 1313 zeroentry do_invalid_op
1314END(invalid_op) 1314END(invalid_op)
1315 1315
1316ENTRY(coprocessor_segment_overrun) 1316ENTRY(coprocessor_segment_overrun)
@@ -1365,7 +1365,7 @@ ENTRY(machine_check)
1365 INTR_FRAME 1365 INTR_FRAME
1366 PARAVIRT_ADJUST_EXCEPTION_FRAME 1366 PARAVIRT_ADJUST_EXCEPTION_FRAME
1367 pushq $0 1367 pushq $0
1368 CFI_ADJUST_CFA_OFFSET 8 1368 CFI_ADJUST_CFA_OFFSET 8
1369 paranoidentry do_machine_check 1369 paranoidentry do_machine_check
1370 jmp paranoid_exit1 1370 jmp paranoid_exit1
1371 CFI_ENDPROC 1371 CFI_ENDPROC