diff options
author | Alexander van Heukelum <heukelum@fastmail.fm> | 2008-11-16 09:29:00 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-17 04:46:55 -0500 |
commit | 0bd7b79851d0f74b24a9ce87d088f2e7c718f668 (patch) | |
tree | cd003490be0a84b1939a367e62de78b863f19596 /arch/x86/kernel/entry_64.S | |
parent | 9dacc71ff31a008d1e689fc824d31f6696454f68 (diff) |
x86: entry_64.S: remove whitespace at end of lines
Impact: cleanup
All blame goes to: color white,red "[^[:graph:]]+$"
in .nanorc ;).
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/entry_64.S')
-rw-r--r-- | arch/x86/kernel/entry_64.S | 190 |
1 files changed, 95 insertions, 95 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index b86f332c96a6..54927784bab9 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -11,15 +11,15 @@ | |||
11 | * | 11 | * |
12 | * NOTE: This code handles signal-recognition, which happens every time | 12 | * NOTE: This code handles signal-recognition, which happens every time |
13 | * after an interrupt and after each system call. | 13 | * after an interrupt and after each system call. |
14 | * | 14 | * |
15 | * Normal syscalls and interrupts don't save a full stack frame, this is | 15 | * Normal syscalls and interrupts don't save a full stack frame, this is |
16 | * only done for syscall tracing, signals or fork/exec et.al. | 16 | * only done for syscall tracing, signals or fork/exec et.al. |
17 | * | 17 | * |
18 | * A note on terminology: | 18 | * A note on terminology: |
19 | * - top of stack: Architecture defined interrupt frame from SS to RIP | 19 | * - top of stack: Architecture defined interrupt frame from SS to RIP |
20 | * at the top of the kernel process stack. | 20 | * at the top of the kernel process stack. |
21 | * - partial stack frame: partially saved registers upto R11. | 21 | * - partial stack frame: partially saved registers upto R11. |
22 | * - full stack frame: Like partial stack frame, but all register saved. | 22 | * - full stack frame: Like partial stack frame, but all register saved. |
23 | * | 23 | * |
24 | * Some macro usage: | 24 | * Some macro usage: |
25 | * - CFI macros are used to generate dwarf2 unwind information for better | 25 | * - CFI macros are used to generate dwarf2 unwind information for better |
@@ -142,7 +142,7 @@ END(mcount) | |||
142 | 142 | ||
143 | #ifndef CONFIG_PREEMPT | 143 | #ifndef CONFIG_PREEMPT |
144 | #define retint_kernel retint_restore_args | 144 | #define retint_kernel retint_restore_args |
145 | #endif | 145 | #endif |
146 | 146 | ||
147 | #ifdef CONFIG_PARAVIRT | 147 | #ifdef CONFIG_PARAVIRT |
148 | ENTRY(native_usergs_sysret64) | 148 | ENTRY(native_usergs_sysret64) |
@@ -161,14 +161,14 @@ ENTRY(native_usergs_sysret64) | |||
161 | .endm | 161 | .endm |
162 | 162 | ||
163 | /* | 163 | /* |
164 | * C code is not supposed to know about undefined top of stack. Every time | 164 | * C code is not supposed to know about undefined top of stack. Every time |
165 | * a C function with an pt_regs argument is called from the SYSCALL based | 165 | * a C function with an pt_regs argument is called from the SYSCALL based |
166 | * fast path FIXUP_TOP_OF_STACK is needed. | 166 | * fast path FIXUP_TOP_OF_STACK is needed. |
167 | * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs | 167 | * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs |
168 | * manipulation. | 168 | * manipulation. |
169 | */ | 169 | */ |
170 | 170 | ||
171 | /* %rsp:at FRAMEEND */ | 171 | /* %rsp:at FRAMEEND */ |
172 | .macro FIXUP_TOP_OF_STACK tmp | 172 | .macro FIXUP_TOP_OF_STACK tmp |
173 | movq %gs:pda_oldrsp,\tmp | 173 | movq %gs:pda_oldrsp,\tmp |
174 | movq \tmp,RSP(%rsp) | 174 | movq \tmp,RSP(%rsp) |
@@ -244,8 +244,8 @@ ENTRY(native_usergs_sysret64) | |||
244 | .endm | 244 | .endm |
245 | /* | 245 | /* |
246 | * A newly forked process directly context switches into this. | 246 | * A newly forked process directly context switches into this. |
247 | */ | 247 | */ |
248 | /* rdi: prev */ | 248 | /* rdi: prev */ |
249 | ENTRY(ret_from_fork) | 249 | ENTRY(ret_from_fork) |
250 | CFI_DEFAULT_STACK | 250 | CFI_DEFAULT_STACK |
251 | push kernel_eflags(%rip) | 251 | push kernel_eflags(%rip) |
@@ -256,7 +256,7 @@ ENTRY(ret_from_fork) | |||
256 | GET_THREAD_INFO(%rcx) | 256 | GET_THREAD_INFO(%rcx) |
257 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) | 257 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) |
258 | jnz rff_trace | 258 | jnz rff_trace |
259 | rff_action: | 259 | rff_action: |
260 | RESTORE_REST | 260 | RESTORE_REST |
261 | testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread? | 261 | testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread? |
262 | je int_ret_from_sys_call | 262 | je int_ret_from_sys_call |
@@ -267,7 +267,7 @@ rff_action: | |||
267 | rff_trace: | 267 | rff_trace: |
268 | movq %rsp,%rdi | 268 | movq %rsp,%rdi |
269 | call syscall_trace_leave | 269 | call syscall_trace_leave |
270 | GET_THREAD_INFO(%rcx) | 270 | GET_THREAD_INFO(%rcx) |
271 | jmp rff_action | 271 | jmp rff_action |
272 | CFI_ENDPROC | 272 | CFI_ENDPROC |
273 | END(ret_from_fork) | 273 | END(ret_from_fork) |
@@ -278,20 +278,20 @@ END(ret_from_fork) | |||
278 | * SYSCALL does not save anything on the stack and does not change the | 278 | * SYSCALL does not save anything on the stack and does not change the |
279 | * stack pointer. | 279 | * stack pointer. |
280 | */ | 280 | */ |
281 | 281 | ||
282 | /* | 282 | /* |
283 | * Register setup: | 283 | * Register setup: |
284 | * rax system call number | 284 | * rax system call number |
285 | * rdi arg0 | 285 | * rdi arg0 |
286 | * rcx return address for syscall/sysret, C arg3 | 286 | * rcx return address for syscall/sysret, C arg3 |
287 | * rsi arg1 | 287 | * rsi arg1 |
288 | * rdx arg2 | 288 | * rdx arg2 |
289 | * r10 arg3 (--> moved to rcx for C) | 289 | * r10 arg3 (--> moved to rcx for C) |
290 | * r8 arg4 | 290 | * r8 arg4 |
291 | * r9 arg5 | 291 | * r9 arg5 |
292 | * r11 eflags for syscall/sysret, temporary for C | 292 | * r11 eflags for syscall/sysret, temporary for C |
293 | * r12-r15,rbp,rbx saved by C code, not touched. | 293 | * r12-r15,rbp,rbx saved by C code, not touched. |
294 | * | 294 | * |
295 | * Interrupts are off on entry. | 295 | * Interrupts are off on entry. |
296 | * Only called from user space. | 296 | * Only called from user space. |
297 | * | 297 | * |
@@ -301,7 +301,7 @@ END(ret_from_fork) | |||
301 | * When user can change the frames always force IRET. That is because | 301 | * When user can change the frames always force IRET. That is because |
302 | * it deals with uncanonical addresses better. SYSRET has trouble | 302 | * it deals with uncanonical addresses better. SYSRET has trouble |
303 | * with them due to bugs in both AMD and Intel CPUs. | 303 | * with them due to bugs in both AMD and Intel CPUs. |
304 | */ | 304 | */ |
305 | 305 | ||
306 | ENTRY(system_call) | 306 | ENTRY(system_call) |
307 | CFI_STARTPROC simple | 307 | CFI_STARTPROC simple |
@@ -317,7 +317,7 @@ ENTRY(system_call) | |||
317 | */ | 317 | */ |
318 | ENTRY(system_call_after_swapgs) | 318 | ENTRY(system_call_after_swapgs) |
319 | 319 | ||
320 | movq %rsp,%gs:pda_oldrsp | 320 | movq %rsp,%gs:pda_oldrsp |
321 | movq %gs:pda_kernelstack,%rsp | 321 | movq %gs:pda_kernelstack,%rsp |
322 | /* | 322 | /* |
323 | * No need to follow this irqs off/on section - it's straight | 323 | * No need to follow this irqs off/on section - it's straight |
@@ -325,7 +325,7 @@ ENTRY(system_call_after_swapgs) | |||
325 | */ | 325 | */ |
326 | ENABLE_INTERRUPTS(CLBR_NONE) | 326 | ENABLE_INTERRUPTS(CLBR_NONE) |
327 | SAVE_ARGS 8,1 | 327 | SAVE_ARGS 8,1 |
328 | movq %rax,ORIG_RAX-ARGOFFSET(%rsp) | 328 | movq %rax,ORIG_RAX-ARGOFFSET(%rsp) |
329 | movq %rcx,RIP-ARGOFFSET(%rsp) | 329 | movq %rcx,RIP-ARGOFFSET(%rsp) |
330 | CFI_REL_OFFSET rip,RIP-ARGOFFSET | 330 | CFI_REL_OFFSET rip,RIP-ARGOFFSET |
331 | GET_THREAD_INFO(%rcx) | 331 | GET_THREAD_INFO(%rcx) |
@@ -339,19 +339,19 @@ system_call_fastpath: | |||
339 | movq %rax,RAX-ARGOFFSET(%rsp) | 339 | movq %rax,RAX-ARGOFFSET(%rsp) |
340 | /* | 340 | /* |
341 | * Syscall return path ending with SYSRET (fast path) | 341 | * Syscall return path ending with SYSRET (fast path) |
342 | * Has incomplete stack frame and undefined top of stack. | 342 | * Has incomplete stack frame and undefined top of stack. |
343 | */ | 343 | */ |
344 | ret_from_sys_call: | 344 | ret_from_sys_call: |
345 | movl $_TIF_ALLWORK_MASK,%edi | 345 | movl $_TIF_ALLWORK_MASK,%edi |
346 | /* edi: flagmask */ | 346 | /* edi: flagmask */ |
347 | sysret_check: | 347 | sysret_check: |
348 | LOCKDEP_SYS_EXIT | 348 | LOCKDEP_SYS_EXIT |
349 | GET_THREAD_INFO(%rcx) | 349 | GET_THREAD_INFO(%rcx) |
350 | DISABLE_INTERRUPTS(CLBR_NONE) | 350 | DISABLE_INTERRUPTS(CLBR_NONE) |
351 | TRACE_IRQS_OFF | 351 | TRACE_IRQS_OFF |
352 | movl TI_flags(%rcx),%edx | 352 | movl TI_flags(%rcx),%edx |
353 | andl %edi,%edx | 353 | andl %edi,%edx |
354 | jnz sysret_careful | 354 | jnz sysret_careful |
355 | CFI_REMEMBER_STATE | 355 | CFI_REMEMBER_STATE |
356 | /* | 356 | /* |
357 | * sysretq will re-enable interrupts: | 357 | * sysretq will re-enable interrupts: |
@@ -366,7 +366,7 @@ sysret_check: | |||
366 | 366 | ||
367 | CFI_RESTORE_STATE | 367 | CFI_RESTORE_STATE |
368 | /* Handle reschedules */ | 368 | /* Handle reschedules */ |
369 | /* edx: work, edi: workmask */ | 369 | /* edx: work, edi: workmask */ |
370 | sysret_careful: | 370 | sysret_careful: |
371 | bt $TIF_NEED_RESCHED,%edx | 371 | bt $TIF_NEED_RESCHED,%edx |
372 | jnc sysret_signal | 372 | jnc sysret_signal |
@@ -379,7 +379,7 @@ sysret_careful: | |||
379 | CFI_ADJUST_CFA_OFFSET -8 | 379 | CFI_ADJUST_CFA_OFFSET -8 |
380 | jmp sysret_check | 380 | jmp sysret_check |
381 | 381 | ||
382 | /* Handle a signal */ | 382 | /* Handle a signal */ |
383 | sysret_signal: | 383 | sysret_signal: |
384 | TRACE_IRQS_ON | 384 | TRACE_IRQS_ON |
385 | ENABLE_INTERRUPTS(CLBR_NONE) | 385 | ENABLE_INTERRUPTS(CLBR_NONE) |
@@ -398,7 +398,7 @@ sysret_signal: | |||
398 | DISABLE_INTERRUPTS(CLBR_NONE) | 398 | DISABLE_INTERRUPTS(CLBR_NONE) |
399 | TRACE_IRQS_OFF | 399 | TRACE_IRQS_OFF |
400 | jmp int_with_check | 400 | jmp int_with_check |
401 | 401 | ||
402 | badsys: | 402 | badsys: |
403 | movq $-ENOSYS,RAX-ARGOFFSET(%rsp) | 403 | movq $-ENOSYS,RAX-ARGOFFSET(%rsp) |
404 | jmp ret_from_sys_call | 404 | jmp ret_from_sys_call |
@@ -437,7 +437,7 @@ sysret_audit: | |||
437 | #endif /* CONFIG_AUDITSYSCALL */ | 437 | #endif /* CONFIG_AUDITSYSCALL */ |
438 | 438 | ||
439 | /* Do syscall tracing */ | 439 | /* Do syscall tracing */ |
440 | tracesys: | 440 | tracesys: |
441 | #ifdef CONFIG_AUDITSYSCALL | 441 | #ifdef CONFIG_AUDITSYSCALL |
442 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx) | 442 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx) |
443 | jz auditsys | 443 | jz auditsys |
@@ -460,8 +460,8 @@ tracesys: | |||
460 | call *sys_call_table(,%rax,8) | 460 | call *sys_call_table(,%rax,8) |
461 | movq %rax,RAX-ARGOFFSET(%rsp) | 461 | movq %rax,RAX-ARGOFFSET(%rsp) |
462 | /* Use IRET because user could have changed frame */ | 462 | /* Use IRET because user could have changed frame */ |
463 | 463 | ||
464 | /* | 464 | /* |
465 | * Syscall return path ending with IRET. | 465 | * Syscall return path ending with IRET. |
466 | * Has correct top of stack, but partial stack frame. | 466 | * Has correct top of stack, but partial stack frame. |
467 | */ | 467 | */ |
@@ -505,18 +505,18 @@ int_very_careful: | |||
505 | TRACE_IRQS_ON | 505 | TRACE_IRQS_ON |
506 | ENABLE_INTERRUPTS(CLBR_NONE) | 506 | ENABLE_INTERRUPTS(CLBR_NONE) |
507 | SAVE_REST | 507 | SAVE_REST |
508 | /* Check for syscall exit trace */ | 508 | /* Check for syscall exit trace */ |
509 | testl $_TIF_WORK_SYSCALL_EXIT,%edx | 509 | testl $_TIF_WORK_SYSCALL_EXIT,%edx |
510 | jz int_signal | 510 | jz int_signal |
511 | pushq %rdi | 511 | pushq %rdi |
512 | CFI_ADJUST_CFA_OFFSET 8 | 512 | CFI_ADJUST_CFA_OFFSET 8 |
513 | leaq 8(%rsp),%rdi # &ptregs -> arg1 | 513 | leaq 8(%rsp),%rdi # &ptregs -> arg1 |
514 | call syscall_trace_leave | 514 | call syscall_trace_leave |
515 | popq %rdi | 515 | popq %rdi |
516 | CFI_ADJUST_CFA_OFFSET -8 | 516 | CFI_ADJUST_CFA_OFFSET -8 |
517 | andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi | 517 | andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi |
518 | jmp int_restore_rest | 518 | jmp int_restore_rest |
519 | 519 | ||
520 | int_signal: | 520 | int_signal: |
521 | testl $_TIF_DO_NOTIFY_MASK,%edx | 521 | testl $_TIF_DO_NOTIFY_MASK,%edx |
522 | jz 1f | 522 | jz 1f |
@@ -531,11 +531,11 @@ int_restore_rest: | |||
531 | jmp int_with_check | 531 | jmp int_with_check |
532 | CFI_ENDPROC | 532 | CFI_ENDPROC |
533 | END(system_call) | 533 | END(system_call) |
534 | 534 | ||
535 | /* | 535 | /* |
536 | * Certain special system calls that need to save a complete full stack frame. | 536 | * Certain special system calls that need to save a complete full stack frame. |
537 | */ | 537 | */ |
538 | 538 | ||
539 | .macro PTREGSCALL label,func,arg | 539 | .macro PTREGSCALL label,func,arg |
540 | .globl \label | 540 | .globl \label |
541 | \label: | 541 | \label: |
@@ -572,7 +572,7 @@ ENTRY(ptregscall_common) | |||
572 | ret | 572 | ret |
573 | CFI_ENDPROC | 573 | CFI_ENDPROC |
574 | END(ptregscall_common) | 574 | END(ptregscall_common) |
575 | 575 | ||
576 | ENTRY(stub_execve) | 576 | ENTRY(stub_execve) |
577 | CFI_STARTPROC | 577 | CFI_STARTPROC |
578 | popq %r11 | 578 | popq %r11 |
@@ -588,11 +588,11 @@ ENTRY(stub_execve) | |||
588 | jmp int_ret_from_sys_call | 588 | jmp int_ret_from_sys_call |
589 | CFI_ENDPROC | 589 | CFI_ENDPROC |
590 | END(stub_execve) | 590 | END(stub_execve) |
591 | 591 | ||
592 | /* | 592 | /* |
593 | * sigreturn is special because it needs to restore all registers on return. | 593 | * sigreturn is special because it needs to restore all registers on return. |
594 | * This cannot be done with SYSRET, so use the IRET return path instead. | 594 | * This cannot be done with SYSRET, so use the IRET return path instead. |
595 | */ | 595 | */ |
596 | ENTRY(stub_rt_sigreturn) | 596 | ENTRY(stub_rt_sigreturn) |
597 | CFI_STARTPROC | 597 | CFI_STARTPROC |
598 | addq $8, %rsp | 598 | addq $8, %rsp |
@@ -685,12 +685,12 @@ exit_intr: | |||
685 | GET_THREAD_INFO(%rcx) | 685 | GET_THREAD_INFO(%rcx) |
686 | testl $3,CS-ARGOFFSET(%rsp) | 686 | testl $3,CS-ARGOFFSET(%rsp) |
687 | je retint_kernel | 687 | je retint_kernel |
688 | 688 | ||
689 | /* Interrupt came from user space */ | 689 | /* Interrupt came from user space */ |
690 | /* | 690 | /* |
691 | * Has a correct top of stack, but a partial stack frame | 691 | * Has a correct top of stack, but a partial stack frame |
692 | * %rcx: thread info. Interrupts off. | 692 | * %rcx: thread info. Interrupts off. |
693 | */ | 693 | */ |
694 | retint_with_reschedule: | 694 | retint_with_reschedule: |
695 | movl $_TIF_WORK_MASK,%edi | 695 | movl $_TIF_WORK_MASK,%edi |
696 | retint_check: | 696 | retint_check: |
@@ -763,20 +763,20 @@ retint_careful: | |||
763 | pushq %rdi | 763 | pushq %rdi |
764 | CFI_ADJUST_CFA_OFFSET 8 | 764 | CFI_ADJUST_CFA_OFFSET 8 |
765 | call schedule | 765 | call schedule |
766 | popq %rdi | 766 | popq %rdi |
767 | CFI_ADJUST_CFA_OFFSET -8 | 767 | CFI_ADJUST_CFA_OFFSET -8 |
768 | GET_THREAD_INFO(%rcx) | 768 | GET_THREAD_INFO(%rcx) |
769 | DISABLE_INTERRUPTS(CLBR_NONE) | 769 | DISABLE_INTERRUPTS(CLBR_NONE) |
770 | TRACE_IRQS_OFF | 770 | TRACE_IRQS_OFF |
771 | jmp retint_check | 771 | jmp retint_check |
772 | 772 | ||
773 | retint_signal: | 773 | retint_signal: |
774 | testl $_TIF_DO_NOTIFY_MASK,%edx | 774 | testl $_TIF_DO_NOTIFY_MASK,%edx |
775 | jz retint_swapgs | 775 | jz retint_swapgs |
776 | TRACE_IRQS_ON | 776 | TRACE_IRQS_ON |
777 | ENABLE_INTERRUPTS(CLBR_NONE) | 777 | ENABLE_INTERRUPTS(CLBR_NONE) |
778 | SAVE_REST | 778 | SAVE_REST |
779 | movq $-1,ORIG_RAX(%rsp) | 779 | movq $-1,ORIG_RAX(%rsp) |
780 | xorl %esi,%esi # oldset | 780 | xorl %esi,%esi # oldset |
781 | movq %rsp,%rdi # &pt_regs | 781 | movq %rsp,%rdi # &pt_regs |
782 | call do_notify_resume | 782 | call do_notify_resume |
@@ -798,14 +798,14 @@ ENTRY(retint_kernel) | |||
798 | jnc retint_restore_args | 798 | jnc retint_restore_args |
799 | call preempt_schedule_irq | 799 | call preempt_schedule_irq |
800 | jmp exit_intr | 800 | jmp exit_intr |
801 | #endif | 801 | #endif |
802 | 802 | ||
803 | CFI_ENDPROC | 803 | CFI_ENDPROC |
804 | END(common_interrupt) | 804 | END(common_interrupt) |
805 | 805 | ||
806 | /* | 806 | /* |
807 | * APIC interrupts. | 807 | * APIC interrupts. |
808 | */ | 808 | */ |
809 | .macro apicinterrupt num,func | 809 | .macro apicinterrupt num,func |
810 | INTR_FRAME | 810 | INTR_FRAME |
811 | pushq $~(\num) | 811 | pushq $~(\num) |
@@ -823,14 +823,14 @@ ENTRY(threshold_interrupt) | |||
823 | apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt | 823 | apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt |
824 | END(threshold_interrupt) | 824 | END(threshold_interrupt) |
825 | 825 | ||
826 | #ifdef CONFIG_SMP | 826 | #ifdef CONFIG_SMP |
827 | ENTRY(reschedule_interrupt) | 827 | ENTRY(reschedule_interrupt) |
828 | apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt | 828 | apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt |
829 | END(reschedule_interrupt) | 829 | END(reschedule_interrupt) |
830 | 830 | ||
831 | .macro INVALIDATE_ENTRY num | 831 | .macro INVALIDATE_ENTRY num |
832 | ENTRY(invalidate_interrupt\num) | 832 | ENTRY(invalidate_interrupt\num) |
833 | apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt | 833 | apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt |
834 | END(invalidate_interrupt\num) | 834 | END(invalidate_interrupt\num) |
835 | .endm | 835 | .endm |
836 | 836 | ||
@@ -869,22 +869,22 @@ END(error_interrupt) | |||
869 | ENTRY(spurious_interrupt) | 869 | ENTRY(spurious_interrupt) |
870 | apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt | 870 | apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt |
871 | END(spurious_interrupt) | 871 | END(spurious_interrupt) |
872 | 872 | ||
873 | /* | 873 | /* |
874 | * Exception entry points. | 874 | * Exception entry points. |
875 | */ | 875 | */ |
876 | .macro zeroentry sym | 876 | .macro zeroentry sym |
877 | INTR_FRAME | 877 | INTR_FRAME |
878 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 878 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
879 | pushq $0 /* push error code/oldrax */ | 879 | pushq $0 /* push error code/oldrax */ |
880 | CFI_ADJUST_CFA_OFFSET 8 | 880 | CFI_ADJUST_CFA_OFFSET 8 |
881 | pushq %rax /* push real oldrax to the rdi slot */ | 881 | pushq %rax /* push real oldrax to the rdi slot */ |
882 | CFI_ADJUST_CFA_OFFSET 8 | 882 | CFI_ADJUST_CFA_OFFSET 8 |
883 | CFI_REL_OFFSET rax,0 | 883 | CFI_REL_OFFSET rax,0 |
884 | leaq \sym(%rip),%rax | 884 | leaq \sym(%rip),%rax |
885 | jmp error_entry | 885 | jmp error_entry |
886 | CFI_ENDPROC | 886 | CFI_ENDPROC |
887 | .endm | 887 | .endm |
888 | 888 | ||
889 | .macro errorentry sym | 889 | .macro errorentry sym |
890 | XCPT_FRAME | 890 | XCPT_FRAME |
@@ -998,13 +998,13 @@ paranoid_schedule\trace: | |||
998 | 998 | ||
999 | /* | 999 | /* |
1000 | * Exception entry point. This expects an error code/orig_rax on the stack | 1000 | * Exception entry point. This expects an error code/orig_rax on the stack |
1001 | * and the exception handler in %rax. | 1001 | * and the exception handler in %rax. |
1002 | */ | 1002 | */ |
1003 | KPROBE_ENTRY(error_entry) | 1003 | KPROBE_ENTRY(error_entry) |
1004 | _frame RDI | 1004 | _frame RDI |
1005 | CFI_REL_OFFSET rax,0 | 1005 | CFI_REL_OFFSET rax,0 |
1006 | /* rdi slot contains rax, oldrax contains error code */ | 1006 | /* rdi slot contains rax, oldrax contains error code */ |
1007 | cld | 1007 | cld |
1008 | subq $14*8,%rsp | 1008 | subq $14*8,%rsp |
1009 | CFI_ADJUST_CFA_OFFSET (14*8) | 1009 | CFI_ADJUST_CFA_OFFSET (14*8) |
1010 | movq %rsi,13*8(%rsp) | 1010 | movq %rsi,13*8(%rsp) |
@@ -1015,7 +1015,7 @@ KPROBE_ENTRY(error_entry) | |||
1015 | CFI_REL_OFFSET rdx,RDX | 1015 | CFI_REL_OFFSET rdx,RDX |
1016 | movq %rcx,11*8(%rsp) | 1016 | movq %rcx,11*8(%rsp) |
1017 | CFI_REL_OFFSET rcx,RCX | 1017 | CFI_REL_OFFSET rcx,RCX |
1018 | movq %rsi,10*8(%rsp) /* store rax */ | 1018 | movq %rsi,10*8(%rsp) /* store rax */ |
1019 | CFI_REL_OFFSET rax,RAX | 1019 | CFI_REL_OFFSET rax,RAX |
1020 | movq %r8, 9*8(%rsp) | 1020 | movq %r8, 9*8(%rsp) |
1021 | CFI_REL_OFFSET r8,R8 | 1021 | CFI_REL_OFFSET r8,R8 |
@@ -1025,29 +1025,29 @@ KPROBE_ENTRY(error_entry) | |||
1025 | CFI_REL_OFFSET r10,R10 | 1025 | CFI_REL_OFFSET r10,R10 |
1026 | movq %r11,6*8(%rsp) | 1026 | movq %r11,6*8(%rsp) |
1027 | CFI_REL_OFFSET r11,R11 | 1027 | CFI_REL_OFFSET r11,R11 |
1028 | movq %rbx,5*8(%rsp) | 1028 | movq %rbx,5*8(%rsp) |
1029 | CFI_REL_OFFSET rbx,RBX | 1029 | CFI_REL_OFFSET rbx,RBX |
1030 | movq %rbp,4*8(%rsp) | 1030 | movq %rbp,4*8(%rsp) |
1031 | CFI_REL_OFFSET rbp,RBP | 1031 | CFI_REL_OFFSET rbp,RBP |
1032 | movq %r12,3*8(%rsp) | 1032 | movq %r12,3*8(%rsp) |
1033 | CFI_REL_OFFSET r12,R12 | 1033 | CFI_REL_OFFSET r12,R12 |
1034 | movq %r13,2*8(%rsp) | 1034 | movq %r13,2*8(%rsp) |
1035 | CFI_REL_OFFSET r13,R13 | 1035 | CFI_REL_OFFSET r13,R13 |
1036 | movq %r14,1*8(%rsp) | 1036 | movq %r14,1*8(%rsp) |
1037 | CFI_REL_OFFSET r14,R14 | 1037 | CFI_REL_OFFSET r14,R14 |
1038 | movq %r15,(%rsp) | 1038 | movq %r15,(%rsp) |
1039 | CFI_REL_OFFSET r15,R15 | 1039 | CFI_REL_OFFSET r15,R15 |
1040 | xorl %ebx,%ebx | 1040 | xorl %ebx,%ebx |
1041 | testl $3,CS(%rsp) | 1041 | testl $3,CS(%rsp) |
1042 | je error_kernelspace | 1042 | je error_kernelspace |
1043 | error_swapgs: | 1043 | error_swapgs: |
1044 | SWAPGS | 1044 | SWAPGS |
1045 | error_sti: | 1045 | error_sti: |
1046 | TRACE_IRQS_OFF | 1046 | TRACE_IRQS_OFF |
1047 | movq %rdi,RDI(%rsp) | 1047 | movq %rdi,RDI(%rsp) |
1048 | CFI_REL_OFFSET rdi,RDI | 1048 | CFI_REL_OFFSET rdi,RDI |
1049 | movq %rsp,%rdi | 1049 | movq %rsp,%rdi |
1050 | movq ORIG_RAX(%rsp),%rsi /* get error code */ | 1050 | movq ORIG_RAX(%rsp),%rsi /* get error code */ |
1051 | movq $-1,ORIG_RAX(%rsp) | 1051 | movq $-1,ORIG_RAX(%rsp) |
1052 | call *%rax | 1052 | call *%rax |
1053 | /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ | 1053 | /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ |
@@ -1056,7 +1056,7 @@ error_exit: | |||
1056 | RESTORE_REST | 1056 | RESTORE_REST |
1057 | DISABLE_INTERRUPTS(CLBR_NONE) | 1057 | DISABLE_INTERRUPTS(CLBR_NONE) |
1058 | TRACE_IRQS_OFF | 1058 | TRACE_IRQS_OFF |
1059 | GET_THREAD_INFO(%rcx) | 1059 | GET_THREAD_INFO(%rcx) |
1060 | testl %eax,%eax | 1060 | testl %eax,%eax |
1061 | jne retint_kernel | 1061 | jne retint_kernel |
1062 | LOCKDEP_SYS_EXIT_IRQ | 1062 | LOCKDEP_SYS_EXIT_IRQ |
@@ -1072,7 +1072,7 @@ error_kernelspace: | |||
1072 | /* There are two places in the kernel that can potentially fault with | 1072 | /* There are two places in the kernel that can potentially fault with |
1073 | usergs. Handle them here. The exception handlers after | 1073 | usergs. Handle them here. The exception handlers after |
1074 | iret run with kernel gs again, so don't set the user space flag. | 1074 | iret run with kernel gs again, so don't set the user space flag. |
1075 | B stepping K8s sometimes report an truncated RIP for IRET | 1075 | B stepping K8s sometimes report an truncated RIP for IRET |
1076 | exceptions returning to compat mode. Check for these here too. */ | 1076 | exceptions returning to compat mode. Check for these here too. */ |
1077 | leaq irq_return(%rip),%rcx | 1077 | leaq irq_return(%rip),%rcx |
1078 | cmpq %rcx,RIP(%rsp) | 1078 | cmpq %rcx,RIP(%rsp) |
@@ -1084,17 +1084,17 @@ error_kernelspace: | |||
1084 | je error_swapgs | 1084 | je error_swapgs |
1085 | jmp error_sti | 1085 | jmp error_sti |
1086 | KPROBE_END(error_entry) | 1086 | KPROBE_END(error_entry) |
1087 | 1087 | ||
1088 | /* Reload gs selector with exception handling */ | 1088 | /* Reload gs selector with exception handling */ |
1089 | /* edi: new selector */ | 1089 | /* edi: new selector */ |
1090 | ENTRY(native_load_gs_index) | 1090 | ENTRY(native_load_gs_index) |
1091 | CFI_STARTPROC | 1091 | CFI_STARTPROC |
1092 | pushf | 1092 | pushf |
1093 | CFI_ADJUST_CFA_OFFSET 8 | 1093 | CFI_ADJUST_CFA_OFFSET 8 |
1094 | DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) | 1094 | DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) |
1095 | SWAPGS | 1095 | SWAPGS |
1096 | gs_change: | 1096 | gs_change: |
1097 | movl %edi,%gs | 1097 | movl %edi,%gs |
1098 | 2: mfence /* workaround */ | 1098 | 2: mfence /* workaround */ |
1099 | SWAPGS | 1099 | SWAPGS |
1100 | popf | 1100 | popf |
@@ -1102,20 +1102,20 @@ gs_change: | |||
1102 | ret | 1102 | ret |
1103 | CFI_ENDPROC | 1103 | CFI_ENDPROC |
1104 | ENDPROC(native_load_gs_index) | 1104 | ENDPROC(native_load_gs_index) |
1105 | 1105 | ||
1106 | .section __ex_table,"a" | 1106 | .section __ex_table,"a" |
1107 | .align 8 | 1107 | .align 8 |
1108 | .quad gs_change,bad_gs | 1108 | .quad gs_change,bad_gs |
1109 | .previous | 1109 | .previous |
1110 | .section .fixup,"ax" | 1110 | .section .fixup,"ax" |
1111 | /* running with kernelgs */ | 1111 | /* running with kernelgs */ |
1112 | bad_gs: | 1112 | bad_gs: |
1113 | SWAPGS /* switch back to user gs */ | 1113 | SWAPGS /* switch back to user gs */ |
1114 | xorl %eax,%eax | 1114 | xorl %eax,%eax |
1115 | movl %eax,%gs | 1115 | movl %eax,%gs |
1116 | jmp 2b | 1116 | jmp 2b |
1117 | .previous | 1117 | .previous |
1118 | 1118 | ||
1119 | /* | 1119 | /* |
1120 | * Create a kernel thread. | 1120 | * Create a kernel thread. |
1121 | * | 1121 | * |
@@ -1138,7 +1138,7 @@ ENTRY(kernel_thread) | |||
1138 | 1138 | ||
1139 | xorl %r8d,%r8d | 1139 | xorl %r8d,%r8d |
1140 | xorl %r9d,%r9d | 1140 | xorl %r9d,%r9d |
1141 | 1141 | ||
1142 | # clone now | 1142 | # clone now |
1143 | call do_fork | 1143 | call do_fork |
1144 | movq %rax,RAX(%rsp) | 1144 | movq %rax,RAX(%rsp) |
@@ -1149,14 +1149,14 @@ ENTRY(kernel_thread) | |||
1149 | * so internally to the x86_64 port you can rely on kernel_thread() | 1149 | * so internally to the x86_64 port you can rely on kernel_thread() |
1150 | * not to reschedule the child before returning, this avoids the need | 1150 | * not to reschedule the child before returning, this avoids the need |
1151 | * of hacks for example to fork off the per-CPU idle tasks. | 1151 | * of hacks for example to fork off the per-CPU idle tasks. |
1152 | * [Hopefully no generic code relies on the reschedule -AK] | 1152 | * [Hopefully no generic code relies on the reschedule -AK] |
1153 | */ | 1153 | */ |
1154 | RESTORE_ALL | 1154 | RESTORE_ALL |
1155 | UNFAKE_STACK_FRAME | 1155 | UNFAKE_STACK_FRAME |
1156 | ret | 1156 | ret |
1157 | CFI_ENDPROC | 1157 | CFI_ENDPROC |
1158 | ENDPROC(kernel_thread) | 1158 | ENDPROC(kernel_thread) |
1159 | 1159 | ||
1160 | child_rip: | 1160 | child_rip: |
1161 | pushq $0 # fake return address | 1161 | pushq $0 # fake return address |
1162 | CFI_STARTPROC | 1162 | CFI_STARTPROC |
@@ -1191,10 +1191,10 @@ ENDPROC(child_rip) | |||
1191 | ENTRY(kernel_execve) | 1191 | ENTRY(kernel_execve) |
1192 | CFI_STARTPROC | 1192 | CFI_STARTPROC |
1193 | FAKE_STACK_FRAME $0 | 1193 | FAKE_STACK_FRAME $0 |
1194 | SAVE_ALL | 1194 | SAVE_ALL |
1195 | movq %rsp,%rcx | 1195 | movq %rsp,%rcx |
1196 | call sys_execve | 1196 | call sys_execve |
1197 | movq %rax, RAX(%rsp) | 1197 | movq %rax, RAX(%rsp) |
1198 | RESTORE_REST | 1198 | RESTORE_REST |
1199 | testq %rax,%rax | 1199 | testq %rax,%rax |
1200 | je int_ret_from_sys_call | 1200 | je int_ret_from_sys_call |
@@ -1213,7 +1213,7 @@ ENTRY(coprocessor_error) | |||
1213 | END(coprocessor_error) | 1213 | END(coprocessor_error) |
1214 | 1214 | ||
1215 | ENTRY(simd_coprocessor_error) | 1215 | ENTRY(simd_coprocessor_error) |
1216 | zeroentry do_simd_coprocessor_error | 1216 | zeroentry do_simd_coprocessor_error |
1217 | END(simd_coprocessor_error) | 1217 | END(simd_coprocessor_error) |
1218 | 1218 | ||
1219 | ENTRY(device_not_available) | 1219 | ENTRY(device_not_available) |
@@ -1225,12 +1225,12 @@ KPROBE_ENTRY(debug) | |||
1225 | INTR_FRAME | 1225 | INTR_FRAME |
1226 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1226 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1227 | pushq $0 | 1227 | pushq $0 |
1228 | CFI_ADJUST_CFA_OFFSET 8 | 1228 | CFI_ADJUST_CFA_OFFSET 8 |
1229 | paranoidentry do_debug, DEBUG_STACK | 1229 | paranoidentry do_debug, DEBUG_STACK |
1230 | paranoidexit | 1230 | paranoidexit |
1231 | KPROBE_END(debug) | 1231 | KPROBE_END(debug) |
1232 | 1232 | ||
1233 | /* runs on exception stack */ | 1233 | /* runs on exception stack */ |
1234 | KPROBE_ENTRY(nmi) | 1234 | KPROBE_ENTRY(nmi) |
1235 | INTR_FRAME | 1235 | INTR_FRAME |
1236 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1236 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
@@ -1264,7 +1264,7 @@ ENTRY(bounds) | |||
1264 | END(bounds) | 1264 | END(bounds) |
1265 | 1265 | ||
1266 | ENTRY(invalid_op) | 1266 | ENTRY(invalid_op) |
1267 | zeroentry do_invalid_op | 1267 | zeroentry do_invalid_op |
1268 | END(invalid_op) | 1268 | END(invalid_op) |
1269 | 1269 | ||
1270 | ENTRY(coprocessor_segment_overrun) | 1270 | ENTRY(coprocessor_segment_overrun) |
@@ -1319,7 +1319,7 @@ ENTRY(machine_check) | |||
1319 | INTR_FRAME | 1319 | INTR_FRAME |
1320 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1320 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1321 | pushq $0 | 1321 | pushq $0 |
1322 | CFI_ADJUST_CFA_OFFSET 8 | 1322 | CFI_ADJUST_CFA_OFFSET 8 |
1323 | paranoidentry do_machine_check | 1323 | paranoidentry do_machine_check |
1324 | jmp paranoid_exit1 | 1324 | jmp paranoid_exit1 |
1325 | CFI_ENDPROC | 1325 | CFI_ENDPROC |