diff options
Diffstat (limited to 'arch/x86/kernel/entry_64.S')
-rw-r--r-- | arch/x86/kernel/entry_64.S | 170 |
1 files changed, 140 insertions, 30 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 556a8df522a7..ae63e584c340 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -51,16 +51,121 @@ | |||
51 | #include <asm/page.h> | 51 | #include <asm/page.h> |
52 | #include <asm/irqflags.h> | 52 | #include <asm/irqflags.h> |
53 | #include <asm/paravirt.h> | 53 | #include <asm/paravirt.h> |
54 | #include <asm/ftrace.h> | ||
54 | 55 | ||
55 | .code64 | 56 | .code64 |
56 | 57 | ||
58 | #ifdef CONFIG_FTRACE | ||
59 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
60 | ENTRY(mcount) | ||
61 | |||
62 | subq $0x38, %rsp | ||
63 | movq %rax, (%rsp) | ||
64 | movq %rcx, 8(%rsp) | ||
65 | movq %rdx, 16(%rsp) | ||
66 | movq %rsi, 24(%rsp) | ||
67 | movq %rdi, 32(%rsp) | ||
68 | movq %r8, 40(%rsp) | ||
69 | movq %r9, 48(%rsp) | ||
70 | |||
71 | movq 0x38(%rsp), %rdi | ||
72 | subq $MCOUNT_INSN_SIZE, %rdi | ||
73 | |||
74 | .globl mcount_call | ||
75 | mcount_call: | ||
76 | call ftrace_stub | ||
77 | |||
78 | movq 48(%rsp), %r9 | ||
79 | movq 40(%rsp), %r8 | ||
80 | movq 32(%rsp), %rdi | ||
81 | movq 24(%rsp), %rsi | ||
82 | movq 16(%rsp), %rdx | ||
83 | movq 8(%rsp), %rcx | ||
84 | movq (%rsp), %rax | ||
85 | addq $0x38, %rsp | ||
86 | |||
87 | retq | ||
88 | END(mcount) | ||
89 | |||
90 | ENTRY(ftrace_caller) | ||
91 | |||
92 | /* taken from glibc */ | ||
93 | subq $0x38, %rsp | ||
94 | movq %rax, (%rsp) | ||
95 | movq %rcx, 8(%rsp) | ||
96 | movq %rdx, 16(%rsp) | ||
97 | movq %rsi, 24(%rsp) | ||
98 | movq %rdi, 32(%rsp) | ||
99 | movq %r8, 40(%rsp) | ||
100 | movq %r9, 48(%rsp) | ||
101 | |||
102 | movq 0x38(%rsp), %rdi | ||
103 | movq 8(%rbp), %rsi | ||
104 | subq $MCOUNT_INSN_SIZE, %rdi | ||
105 | |||
106 | .globl ftrace_call | ||
107 | ftrace_call: | ||
108 | call ftrace_stub | ||
109 | |||
110 | movq 48(%rsp), %r9 | ||
111 | movq 40(%rsp), %r8 | ||
112 | movq 32(%rsp), %rdi | ||
113 | movq 24(%rsp), %rsi | ||
114 | movq 16(%rsp), %rdx | ||
115 | movq 8(%rsp), %rcx | ||
116 | movq (%rsp), %rax | ||
117 | addq $0x38, %rsp | ||
118 | |||
119 | .globl ftrace_stub | ||
120 | ftrace_stub: | ||
121 | retq | ||
122 | END(ftrace_caller) | ||
123 | |||
124 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | ||
125 | ENTRY(mcount) | ||
126 | cmpq $ftrace_stub, ftrace_trace_function | ||
127 | jnz trace | ||
128 | .globl ftrace_stub | ||
129 | ftrace_stub: | ||
130 | retq | ||
131 | |||
132 | trace: | ||
133 | /* taken from glibc */ | ||
134 | subq $0x38, %rsp | ||
135 | movq %rax, (%rsp) | ||
136 | movq %rcx, 8(%rsp) | ||
137 | movq %rdx, 16(%rsp) | ||
138 | movq %rsi, 24(%rsp) | ||
139 | movq %rdi, 32(%rsp) | ||
140 | movq %r8, 40(%rsp) | ||
141 | movq %r9, 48(%rsp) | ||
142 | |||
143 | movq 0x38(%rsp), %rdi | ||
144 | movq 8(%rbp), %rsi | ||
145 | subq $MCOUNT_INSN_SIZE, %rdi | ||
146 | |||
147 | call *ftrace_trace_function | ||
148 | |||
149 | movq 48(%rsp), %r9 | ||
150 | movq 40(%rsp), %r8 | ||
151 | movq 32(%rsp), %rdi | ||
152 | movq 24(%rsp), %rsi | ||
153 | movq 16(%rsp), %rdx | ||
154 | movq 8(%rsp), %rcx | ||
155 | movq (%rsp), %rax | ||
156 | addq $0x38, %rsp | ||
157 | |||
158 | jmp ftrace_stub | ||
159 | END(mcount) | ||
160 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
161 | #endif /* CONFIG_FTRACE */ | ||
162 | |||
57 | #ifndef CONFIG_PREEMPT | 163 | #ifndef CONFIG_PREEMPT |
58 | #define retint_kernel retint_restore_args | 164 | #define retint_kernel retint_restore_args |
59 | #endif | 165 | #endif |
60 | 166 | ||
61 | #ifdef CONFIG_PARAVIRT | 167 | #ifdef CONFIG_PARAVIRT |
62 | ENTRY(native_irq_enable_syscall_ret) | 168 | ENTRY(native_usergs_sysret64) |
63 | movq %gs:pda_oldrsp,%rsp | ||
64 | swapgs | 169 | swapgs |
65 | sysretq | 170 | sysretq |
66 | #endif /* CONFIG_PARAVIRT */ | 171 | #endif /* CONFIG_PARAVIRT */ |
@@ -104,7 +209,7 @@ ENTRY(native_irq_enable_syscall_ret) | |||
104 | .macro FAKE_STACK_FRAME child_rip | 209 | .macro FAKE_STACK_FRAME child_rip |
105 | /* push in order ss, rsp, eflags, cs, rip */ | 210 | /* push in order ss, rsp, eflags, cs, rip */ |
106 | xorl %eax, %eax | 211 | xorl %eax, %eax |
107 | pushq %rax /* ss */ | 212 | pushq $__KERNEL_DS /* ss */ |
108 | CFI_ADJUST_CFA_OFFSET 8 | 213 | CFI_ADJUST_CFA_OFFSET 8 |
109 | /*CFI_REL_OFFSET ss,0*/ | 214 | /*CFI_REL_OFFSET ss,0*/ |
110 | pushq %rax /* rsp */ | 215 | pushq %rax /* rsp */ |
@@ -169,13 +274,13 @@ ENTRY(ret_from_fork) | |||
169 | CFI_ADJUST_CFA_OFFSET -4 | 274 | CFI_ADJUST_CFA_OFFSET -4 |
170 | call schedule_tail | 275 | call schedule_tail |
171 | GET_THREAD_INFO(%rcx) | 276 | GET_THREAD_INFO(%rcx) |
172 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx) | 277 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) |
173 | jnz rff_trace | 278 | jnz rff_trace |
174 | rff_action: | 279 | rff_action: |
175 | RESTORE_REST | 280 | RESTORE_REST |
176 | testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread? | 281 | testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread? |
177 | je int_ret_from_sys_call | 282 | je int_ret_from_sys_call |
178 | testl $_TIF_IA32,threadinfo_flags(%rcx) | 283 | testl $_TIF_IA32,TI_flags(%rcx) |
179 | jnz int_ret_from_sys_call | 284 | jnz int_ret_from_sys_call |
180 | RESTORE_TOP_OF_STACK %rdi,ARGOFFSET | 285 | RESTORE_TOP_OF_STACK %rdi,ARGOFFSET |
181 | jmp ret_from_sys_call | 286 | jmp ret_from_sys_call |
@@ -244,7 +349,8 @@ ENTRY(system_call_after_swapgs) | |||
244 | movq %rcx,RIP-ARGOFFSET(%rsp) | 349 | movq %rcx,RIP-ARGOFFSET(%rsp) |
245 | CFI_REL_OFFSET rip,RIP-ARGOFFSET | 350 | CFI_REL_OFFSET rip,RIP-ARGOFFSET |
246 | GET_THREAD_INFO(%rcx) | 351 | GET_THREAD_INFO(%rcx) |
247 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx) | 352 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP), \ |
353 | TI_flags(%rcx) | ||
248 | jnz tracesys | 354 | jnz tracesys |
249 | cmpq $__NR_syscall_max,%rax | 355 | cmpq $__NR_syscall_max,%rax |
250 | ja badsys | 356 | ja badsys |
@@ -263,7 +369,7 @@ sysret_check: | |||
263 | GET_THREAD_INFO(%rcx) | 369 | GET_THREAD_INFO(%rcx) |
264 | DISABLE_INTERRUPTS(CLBR_NONE) | 370 | DISABLE_INTERRUPTS(CLBR_NONE) |
265 | TRACE_IRQS_OFF | 371 | TRACE_IRQS_OFF |
266 | movl threadinfo_flags(%rcx),%edx | 372 | movl TI_flags(%rcx),%edx |
267 | andl %edi,%edx | 373 | andl %edi,%edx |
268 | jnz sysret_careful | 374 | jnz sysret_careful |
269 | CFI_REMEMBER_STATE | 375 | CFI_REMEMBER_STATE |
@@ -275,7 +381,8 @@ sysret_check: | |||
275 | CFI_REGISTER rip,rcx | 381 | CFI_REGISTER rip,rcx |
276 | RESTORE_ARGS 0,-ARG_SKIP,1 | 382 | RESTORE_ARGS 0,-ARG_SKIP,1 |
277 | /*CFI_REGISTER rflags,r11*/ | 383 | /*CFI_REGISTER rflags,r11*/ |
278 | ENABLE_INTERRUPTS_SYSCALL_RET | 384 | movq %gs:pda_oldrsp, %rsp |
385 | USERGS_SYSRET64 | ||
279 | 386 | ||
280 | CFI_RESTORE_STATE | 387 | CFI_RESTORE_STATE |
281 | /* Handle reschedules */ | 388 | /* Handle reschedules */ |
@@ -305,7 +412,7 @@ sysret_signal: | |||
305 | leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1 | 412 | leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1 |
306 | xorl %esi,%esi # oldset -> arg2 | 413 | xorl %esi,%esi # oldset -> arg2 |
307 | call ptregscall_common | 414 | call ptregscall_common |
308 | 1: movl $_TIF_NEED_RESCHED,%edi | 415 | 1: movl $_TIF_WORK_MASK,%edi |
309 | /* Use IRET because user could have changed frame. This | 416 | /* Use IRET because user could have changed frame. This |
310 | works because ptregscall_common has called FIXUP_TOP_OF_STACK. */ | 417 | works because ptregscall_common has called FIXUP_TOP_OF_STACK. */ |
311 | DISABLE_INTERRUPTS(CLBR_NONE) | 418 | DISABLE_INTERRUPTS(CLBR_NONE) |
@@ -347,10 +454,10 @@ int_ret_from_sys_call: | |||
347 | int_with_check: | 454 | int_with_check: |
348 | LOCKDEP_SYS_EXIT_IRQ | 455 | LOCKDEP_SYS_EXIT_IRQ |
349 | GET_THREAD_INFO(%rcx) | 456 | GET_THREAD_INFO(%rcx) |
350 | movl threadinfo_flags(%rcx),%edx | 457 | movl TI_flags(%rcx),%edx |
351 | andl %edi,%edx | 458 | andl %edi,%edx |
352 | jnz int_careful | 459 | jnz int_careful |
353 | andl $~TS_COMPAT,threadinfo_status(%rcx) | 460 | andl $~TS_COMPAT,TI_status(%rcx) |
354 | jmp retint_swapgs | 461 | jmp retint_swapgs |
355 | 462 | ||
356 | /* Either reschedule or signal or syscall exit tracking needed. */ | 463 | /* Either reschedule or signal or syscall exit tracking needed. */ |
@@ -393,7 +500,7 @@ int_signal: | |||
393 | movq %rsp,%rdi # &ptregs -> arg1 | 500 | movq %rsp,%rdi # &ptregs -> arg1 |
394 | xorl %esi,%esi # oldset -> arg2 | 501 | xorl %esi,%esi # oldset -> arg2 |
395 | call do_notify_resume | 502 | call do_notify_resume |
396 | 1: movl $_TIF_NEED_RESCHED,%edi | 503 | 1: movl $_TIF_WORK_MASK,%edi |
397 | int_restore_rest: | 504 | int_restore_rest: |
398 | RESTORE_REST | 505 | RESTORE_REST |
399 | DISABLE_INTERRUPTS(CLBR_NONE) | 506 | DISABLE_INTERRUPTS(CLBR_NONE) |
@@ -420,7 +527,6 @@ END(\label) | |||
420 | PTREGSCALL stub_clone, sys_clone, %r8 | 527 | PTREGSCALL stub_clone, sys_clone, %r8 |
421 | PTREGSCALL stub_fork, sys_fork, %rdi | 528 | PTREGSCALL stub_fork, sys_fork, %rdi |
422 | PTREGSCALL stub_vfork, sys_vfork, %rdi | 529 | PTREGSCALL stub_vfork, sys_vfork, %rdi |
423 | PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx | ||
424 | PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx | 530 | PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx |
425 | PTREGSCALL stub_iopl, sys_iopl, %rsi | 531 | PTREGSCALL stub_iopl, sys_iopl, %rsi |
426 | 532 | ||
@@ -559,7 +665,7 @@ retint_with_reschedule: | |||
559 | movl $_TIF_WORK_MASK,%edi | 665 | movl $_TIF_WORK_MASK,%edi |
560 | retint_check: | 666 | retint_check: |
561 | LOCKDEP_SYS_EXIT_IRQ | 667 | LOCKDEP_SYS_EXIT_IRQ |
562 | movl threadinfo_flags(%rcx),%edx | 668 | movl TI_flags(%rcx),%edx |
563 | andl %edi,%edx | 669 | andl %edi,%edx |
564 | CFI_REMEMBER_STATE | 670 | CFI_REMEMBER_STATE |
565 | jnz retint_careful | 671 | jnz retint_careful |
@@ -647,17 +753,16 @@ retint_signal: | |||
647 | RESTORE_REST | 753 | RESTORE_REST |
648 | DISABLE_INTERRUPTS(CLBR_NONE) | 754 | DISABLE_INTERRUPTS(CLBR_NONE) |
649 | TRACE_IRQS_OFF | 755 | TRACE_IRQS_OFF |
650 | movl $_TIF_NEED_RESCHED,%edi | ||
651 | GET_THREAD_INFO(%rcx) | 756 | GET_THREAD_INFO(%rcx) |
652 | jmp retint_check | 757 | jmp retint_with_reschedule |
653 | 758 | ||
654 | #ifdef CONFIG_PREEMPT | 759 | #ifdef CONFIG_PREEMPT |
655 | /* Returning to kernel space. Check if we need preemption */ | 760 | /* Returning to kernel space. Check if we need preemption */ |
656 | /* rcx: threadinfo. interrupts off. */ | 761 | /* rcx: threadinfo. interrupts off. */ |
657 | ENTRY(retint_kernel) | 762 | ENTRY(retint_kernel) |
658 | cmpl $0,threadinfo_preempt_count(%rcx) | 763 | cmpl $0,TI_preempt_count(%rcx) |
659 | jnz retint_restore_args | 764 | jnz retint_restore_args |
660 | bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx) | 765 | bt $TIF_NEED_RESCHED,TI_flags(%rcx) |
661 | jnc retint_restore_args | 766 | jnc retint_restore_args |
662 | bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ | 767 | bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ |
663 | jnc retint_restore_args | 768 | jnc retint_restore_args |
@@ -711,6 +816,9 @@ END(invalidate_interrupt\num) | |||
711 | ENTRY(call_function_interrupt) | 816 | ENTRY(call_function_interrupt) |
712 | apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt | 817 | apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt |
713 | END(call_function_interrupt) | 818 | END(call_function_interrupt) |
819 | ENTRY(call_function_single_interrupt) | ||
820 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt | ||
821 | END(call_function_single_interrupt) | ||
714 | ENTRY(irq_move_cleanup_interrupt) | 822 | ENTRY(irq_move_cleanup_interrupt) |
715 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt | 823 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt |
716 | END(irq_move_cleanup_interrupt) | 824 | END(irq_move_cleanup_interrupt) |
@@ -720,6 +828,10 @@ ENTRY(apic_timer_interrupt) | |||
720 | apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt | 828 | apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt |
721 | END(apic_timer_interrupt) | 829 | END(apic_timer_interrupt) |
722 | 830 | ||
831 | ENTRY(uv_bau_message_intr1) | ||
832 | apicinterrupt 220,uv_bau_message_interrupt | ||
833 | END(uv_bau_message_intr1) | ||
834 | |||
723 | ENTRY(error_interrupt) | 835 | ENTRY(error_interrupt) |
724 | apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt | 836 | apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt |
725 | END(error_interrupt) | 837 | END(error_interrupt) |
@@ -733,6 +845,7 @@ END(spurious_interrupt) | |||
733 | */ | 845 | */ |
734 | .macro zeroentry sym | 846 | .macro zeroentry sym |
735 | INTR_FRAME | 847 | INTR_FRAME |
848 | PARAVIRT_ADJUST_EXCEPTION_FRAME | ||
736 | pushq $0 /* push error code/oldrax */ | 849 | pushq $0 /* push error code/oldrax */ |
737 | CFI_ADJUST_CFA_OFFSET 8 | 850 | CFI_ADJUST_CFA_OFFSET 8 |
738 | pushq %rax /* push real oldrax to the rdi slot */ | 851 | pushq %rax /* push real oldrax to the rdi slot */ |
@@ -745,6 +858,7 @@ END(spurious_interrupt) | |||
745 | 858 | ||
746 | .macro errorentry sym | 859 | .macro errorentry sym |
747 | XCPT_FRAME | 860 | XCPT_FRAME |
861 | PARAVIRT_ADJUST_EXCEPTION_FRAME | ||
748 | pushq %rax | 862 | pushq %rax |
749 | CFI_ADJUST_CFA_OFFSET 8 | 863 | CFI_ADJUST_CFA_OFFSET 8 |
750 | CFI_REL_OFFSET rax,0 | 864 | CFI_REL_OFFSET rax,0 |
@@ -814,7 +928,7 @@ paranoid_restore\trace: | |||
814 | jmp irq_return | 928 | jmp irq_return |
815 | paranoid_userspace\trace: | 929 | paranoid_userspace\trace: |
816 | GET_THREAD_INFO(%rcx) | 930 | GET_THREAD_INFO(%rcx) |
817 | movl threadinfo_flags(%rcx),%ebx | 931 | movl TI_flags(%rcx),%ebx |
818 | andl $_TIF_WORK_MASK,%ebx | 932 | andl $_TIF_WORK_MASK,%ebx |
819 | jz paranoid_swapgs\trace | 933 | jz paranoid_swapgs\trace |
820 | movq %rsp,%rdi /* &pt_regs */ | 934 | movq %rsp,%rdi /* &pt_regs */ |
@@ -912,7 +1026,7 @@ error_exit: | |||
912 | testl %eax,%eax | 1026 | testl %eax,%eax |
913 | jne retint_kernel | 1027 | jne retint_kernel |
914 | LOCKDEP_SYS_EXIT_IRQ | 1028 | LOCKDEP_SYS_EXIT_IRQ |
915 | movl threadinfo_flags(%rcx),%edx | 1029 | movl TI_flags(%rcx),%edx |
916 | movl $_TIF_WORK_MASK,%edi | 1030 | movl $_TIF_WORK_MASK,%edi |
917 | andl %edi,%edx | 1031 | andl %edi,%edx |
918 | jnz retint_careful | 1032 | jnz retint_careful |
@@ -926,11 +1040,11 @@ error_kernelspace: | |||
926 | iret run with kernel gs again, so don't set the user space flag. | 1040 | iret run with kernel gs again, so don't set the user space flag. |
927 | B stepping K8s sometimes report an truncated RIP for IRET | 1041 | B stepping K8s sometimes report an truncated RIP for IRET |
928 | exceptions returning to compat mode. Check for these here too. */ | 1042 | exceptions returning to compat mode. Check for these here too. */ |
929 | leaq irq_return(%rip),%rbp | 1043 | leaq irq_return(%rip),%rcx |
930 | cmpq %rbp,RIP(%rsp) | 1044 | cmpq %rcx,RIP(%rsp) |
931 | je error_swapgs | 1045 | je error_swapgs |
932 | movl %ebp,%ebp /* zero extend */ | 1046 | movl %ecx,%ecx /* zero extend */ |
933 | cmpq %rbp,RIP(%rsp) | 1047 | cmpq %rcx,RIP(%rsp) |
934 | je error_swapgs | 1048 | je error_swapgs |
935 | cmpq $gs_change,RIP(%rsp) | 1049 | cmpq $gs_change,RIP(%rsp) |
936 | je error_swapgs | 1050 | je error_swapgs |
@@ -939,7 +1053,7 @@ KPROBE_END(error_entry) | |||
939 | 1053 | ||
940 | /* Reload gs selector with exception handling */ | 1054 | /* Reload gs selector with exception handling */ |
941 | /* edi: new selector */ | 1055 | /* edi: new selector */ |
942 | ENTRY(load_gs_index) | 1056 | ENTRY(native_load_gs_index) |
943 | CFI_STARTPROC | 1057 | CFI_STARTPROC |
944 | pushf | 1058 | pushf |
945 | CFI_ADJUST_CFA_OFFSET 8 | 1059 | CFI_ADJUST_CFA_OFFSET 8 |
@@ -953,7 +1067,7 @@ gs_change: | |||
953 | CFI_ADJUST_CFA_OFFSET -8 | 1067 | CFI_ADJUST_CFA_OFFSET -8 |
954 | ret | 1068 | ret |
955 | CFI_ENDPROC | 1069 | CFI_ENDPROC |
956 | ENDPROC(load_gs_index) | 1070 | ENDPROC(native_load_gs_index) |
957 | 1071 | ||
958 | .section __ex_table,"a" | 1072 | .section __ex_table,"a" |
959 | .align 8 | 1073 | .align 8 |
@@ -1120,10 +1234,6 @@ ENTRY(coprocessor_segment_overrun) | |||
1120 | zeroentry do_coprocessor_segment_overrun | 1234 | zeroentry do_coprocessor_segment_overrun |
1121 | END(coprocessor_segment_overrun) | 1235 | END(coprocessor_segment_overrun) |
1122 | 1236 | ||
1123 | ENTRY(reserved) | ||
1124 | zeroentry do_reserved | ||
1125 | END(reserved) | ||
1126 | |||
1127 | /* runs on exception stack */ | 1237 | /* runs on exception stack */ |
1128 | ENTRY(double_fault) | 1238 | ENTRY(double_fault) |
1129 | XCPT_FRAME | 1239 | XCPT_FRAME |