diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:17:01 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:17:01 -0400 |
commit | 9a163ed8e0552fdcffe405d2ea7134819a81456e (patch) | |
tree | b322fd2afbb812ba7ddfd22f3734aaab007c2aa5 /arch/x86/kernel/entry_32.S | |
parent | f7627e2513987bb5d4e8cb13c4e0a478352141ac (diff) |
i386: move kernel
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/entry_32.S')
-rw-r--r-- | arch/x86/kernel/entry_32.S | 1112 |
1 files changed, 1112 insertions, 0 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S new file mode 100644 index 000000000000..290b7bc82da3 --- /dev/null +++ b/arch/x86/kernel/entry_32.S | |||
@@ -0,0 +1,1112 @@ | |||
1 | /* | ||
2 | * linux/arch/i386/entry.S | ||
3 | * | ||
4 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * entry.S contains the system-call and fault low-level handling routines. | ||
9 | * This also contains the timer-interrupt handler, as well as all interrupts | ||
10 | * and faults that can result in a task-switch. | ||
11 | * | ||
12 | * NOTE: This code handles signal-recognition, which happens every time | ||
13 | * after a timer-interrupt and after each system call. | ||
14 | * | ||
15 | * I changed all the .align's to 4 (16 byte alignment), as that's faster | ||
16 | * on a 486. | ||
17 | * | ||
18 | * Stack layout in 'syscall_exit': | ||
19 | * ptrace needs to have all regs on the stack. | ||
20 | * if the order here is changed, it needs to be | ||
21 | * updated in fork.c:copy_process, signal.c:do_signal, | ||
22 | * ptrace.c and ptrace.h | ||
23 | * | ||
24 | * 0(%esp) - %ebx | ||
25 | * 4(%esp) - %ecx | ||
26 | * 8(%esp) - %edx | ||
27 | * C(%esp) - %esi | ||
28 | * 10(%esp) - %edi | ||
29 | * 14(%esp) - %ebp | ||
30 | * 18(%esp) - %eax | ||
31 | * 1C(%esp) - %ds | ||
32 | * 20(%esp) - %es | ||
33 | * 24(%esp) - %fs | ||
34 | * 28(%esp) - orig_eax | ||
35 | * 2C(%esp) - %eip | ||
36 | * 30(%esp) - %cs | ||
37 | * 34(%esp) - %eflags | ||
38 | * 38(%esp) - %oldesp | ||
39 | * 3C(%esp) - %oldss | ||
40 | * | ||
41 | * "current" is in register %ebx during any slow entries. | ||
42 | */ | ||
43 | |||
44 | #include <linux/linkage.h> | ||
45 | #include <asm/thread_info.h> | ||
46 | #include <asm/irqflags.h> | ||
47 | #include <asm/errno.h> | ||
48 | #include <asm/segment.h> | ||
49 | #include <asm/smp.h> | ||
50 | #include <asm/page.h> | ||
51 | #include <asm/desc.h> | ||
52 | #include <asm/percpu.h> | ||
53 | #include <asm/dwarf2.h> | ||
54 | #include "irq_vectors.h" | ||
55 | |||
56 | /* | ||
57 | * We use macros for low-level operations which need to be overridden | ||
58 | * for paravirtualization. The following will never clobber any registers: | ||
59 | * INTERRUPT_RETURN (aka. "iret") | ||
60 | * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") | ||
61 | * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). | ||
62 | * | ||
63 | * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must | ||
64 | * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). | ||
65 | * Allowing a register to be clobbered can shrink the paravirt replacement | ||
66 | * enough to patch inline, increasing performance. | ||
67 | */ | ||
68 | |||
69 | #define nr_syscalls ((syscall_table_size)/4) | ||
70 | |||
71 | CF_MASK = 0x00000001 | ||
72 | TF_MASK = 0x00000100 | ||
73 | IF_MASK = 0x00000200 | ||
74 | DF_MASK = 0x00000400 | ||
75 | NT_MASK = 0x00004000 | ||
76 | VM_MASK = 0x00020000 | ||
77 | |||
78 | #ifdef CONFIG_PREEMPT | ||
79 | #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF | ||
80 | #else | ||
81 | #define preempt_stop(clobbers) | ||
82 | #define resume_kernel restore_nocheck | ||
83 | #endif | ||
84 | |||
85 | .macro TRACE_IRQS_IRET | ||
86 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
87 | testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off? | ||
88 | jz 1f | ||
89 | TRACE_IRQS_ON | ||
90 | 1: | ||
91 | #endif | ||
92 | .endm | ||
93 | |||
94 | #ifdef CONFIG_VM86 | ||
95 | #define resume_userspace_sig check_userspace | ||
96 | #else | ||
97 | #define resume_userspace_sig resume_userspace | ||
98 | #endif | ||
99 | |||
100 | #define SAVE_ALL \ | ||
101 | cld; \ | ||
102 | pushl %fs; \ | ||
103 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
104 | /*CFI_REL_OFFSET fs, 0;*/\ | ||
105 | pushl %es; \ | ||
106 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
107 | /*CFI_REL_OFFSET es, 0;*/\ | ||
108 | pushl %ds; \ | ||
109 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
110 | /*CFI_REL_OFFSET ds, 0;*/\ | ||
111 | pushl %eax; \ | ||
112 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
113 | CFI_REL_OFFSET eax, 0;\ | ||
114 | pushl %ebp; \ | ||
115 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
116 | CFI_REL_OFFSET ebp, 0;\ | ||
117 | pushl %edi; \ | ||
118 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
119 | CFI_REL_OFFSET edi, 0;\ | ||
120 | pushl %esi; \ | ||
121 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
122 | CFI_REL_OFFSET esi, 0;\ | ||
123 | pushl %edx; \ | ||
124 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
125 | CFI_REL_OFFSET edx, 0;\ | ||
126 | pushl %ecx; \ | ||
127 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
128 | CFI_REL_OFFSET ecx, 0;\ | ||
129 | pushl %ebx; \ | ||
130 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
131 | CFI_REL_OFFSET ebx, 0;\ | ||
132 | movl $(__USER_DS), %edx; \ | ||
133 | movl %edx, %ds; \ | ||
134 | movl %edx, %es; \ | ||
135 | movl $(__KERNEL_PERCPU), %edx; \ | ||
136 | movl %edx, %fs | ||
137 | |||
138 | #define RESTORE_INT_REGS \ | ||
139 | popl %ebx; \ | ||
140 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
141 | CFI_RESTORE ebx;\ | ||
142 | popl %ecx; \ | ||
143 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
144 | CFI_RESTORE ecx;\ | ||
145 | popl %edx; \ | ||
146 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
147 | CFI_RESTORE edx;\ | ||
148 | popl %esi; \ | ||
149 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
150 | CFI_RESTORE esi;\ | ||
151 | popl %edi; \ | ||
152 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
153 | CFI_RESTORE edi;\ | ||
154 | popl %ebp; \ | ||
155 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
156 | CFI_RESTORE ebp;\ | ||
157 | popl %eax; \ | ||
158 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
159 | CFI_RESTORE eax | ||
160 | |||
161 | #define RESTORE_REGS \ | ||
162 | RESTORE_INT_REGS; \ | ||
163 | 1: popl %ds; \ | ||
164 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
165 | /*CFI_RESTORE ds;*/\ | ||
166 | 2: popl %es; \ | ||
167 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
168 | /*CFI_RESTORE es;*/\ | ||
169 | 3: popl %fs; \ | ||
170 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
171 | /*CFI_RESTORE fs;*/\ | ||
172 | .pushsection .fixup,"ax"; \ | ||
173 | 4: movl $0,(%esp); \ | ||
174 | jmp 1b; \ | ||
175 | 5: movl $0,(%esp); \ | ||
176 | jmp 2b; \ | ||
177 | 6: movl $0,(%esp); \ | ||
178 | jmp 3b; \ | ||
179 | .section __ex_table,"a";\ | ||
180 | .align 4; \ | ||
181 | .long 1b,4b; \ | ||
182 | .long 2b,5b; \ | ||
183 | .long 3b,6b; \ | ||
184 | .popsection | ||
185 | |||
186 | #define RING0_INT_FRAME \ | ||
187 | CFI_STARTPROC simple;\ | ||
188 | CFI_SIGNAL_FRAME;\ | ||
189 | CFI_DEF_CFA esp, 3*4;\ | ||
190 | /*CFI_OFFSET cs, -2*4;*/\ | ||
191 | CFI_OFFSET eip, -3*4 | ||
192 | |||
193 | #define RING0_EC_FRAME \ | ||
194 | CFI_STARTPROC simple;\ | ||
195 | CFI_SIGNAL_FRAME;\ | ||
196 | CFI_DEF_CFA esp, 4*4;\ | ||
197 | /*CFI_OFFSET cs, -2*4;*/\ | ||
198 | CFI_OFFSET eip, -3*4 | ||
199 | |||
200 | #define RING0_PTREGS_FRAME \ | ||
201 | CFI_STARTPROC simple;\ | ||
202 | CFI_SIGNAL_FRAME;\ | ||
203 | CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\ | ||
204 | /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\ | ||
205 | CFI_OFFSET eip, PT_EIP-PT_OLDESP;\ | ||
206 | /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\ | ||
207 | /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\ | ||
208 | CFI_OFFSET eax, PT_EAX-PT_OLDESP;\ | ||
209 | CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\ | ||
210 | CFI_OFFSET edi, PT_EDI-PT_OLDESP;\ | ||
211 | CFI_OFFSET esi, PT_ESI-PT_OLDESP;\ | ||
212 | CFI_OFFSET edx, PT_EDX-PT_OLDESP;\ | ||
213 | CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\ | ||
214 | CFI_OFFSET ebx, PT_EBX-PT_OLDESP | ||
215 | |||
216 | ENTRY(ret_from_fork) | ||
217 | CFI_STARTPROC | ||
218 | pushl %eax | ||
219 | CFI_ADJUST_CFA_OFFSET 4 | ||
220 | call schedule_tail | ||
221 | GET_THREAD_INFO(%ebp) | ||
222 | popl %eax | ||
223 | CFI_ADJUST_CFA_OFFSET -4 | ||
224 | pushl $0x0202 # Reset kernel eflags | ||
225 | CFI_ADJUST_CFA_OFFSET 4 | ||
226 | popfl | ||
227 | CFI_ADJUST_CFA_OFFSET -4 | ||
228 | jmp syscall_exit | ||
229 | CFI_ENDPROC | ||
230 | END(ret_from_fork) | ||
231 | |||
232 | /* | ||
233 | * Return to user mode is not as complex as all this looks, | ||
234 | * but we want the default path for a system call return to | ||
235 | * go as quickly as possible which is why some of this is | ||
236 | * less clear than it otherwise should be. | ||
237 | */ | ||
238 | |||
239 | # userspace resumption stub bypassing syscall exit tracing | ||
240 | ALIGN | ||
241 | RING0_PTREGS_FRAME | ||
242 | ret_from_exception: | ||
243 | preempt_stop(CLBR_ANY) | ||
244 | ret_from_intr: | ||
245 | GET_THREAD_INFO(%ebp) | ||
246 | check_userspace: | ||
247 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS | ||
248 | movb PT_CS(%esp), %al | ||
249 | andl $(VM_MASK | SEGMENT_RPL_MASK), %eax | ||
250 | cmpl $USER_RPL, %eax | ||
251 | jb resume_kernel # not returning to v8086 or userspace | ||
252 | |||
253 | ENTRY(resume_userspace) | ||
254 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt | ||
255 | # setting need_resched or sigpending | ||
256 | # between sampling and the iret | ||
257 | movl TI_flags(%ebp), %ecx | ||
258 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done on | ||
259 | # int/exception return? | ||
260 | jne work_pending | ||
261 | jmp restore_all | ||
262 | END(ret_from_exception) | ||
263 | |||
264 | #ifdef CONFIG_PREEMPT | ||
265 | ENTRY(resume_kernel) | ||
266 | DISABLE_INTERRUPTS(CLBR_ANY) | ||
267 | cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? | ||
268 | jnz restore_nocheck | ||
269 | need_resched: | ||
270 | movl TI_flags(%ebp), %ecx # need_resched set ? | ||
271 | testb $_TIF_NEED_RESCHED, %cl | ||
272 | jz restore_all | ||
273 | testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ? | ||
274 | jz restore_all | ||
275 | call preempt_schedule_irq | ||
276 | jmp need_resched | ||
277 | END(resume_kernel) | ||
278 | #endif | ||
279 | CFI_ENDPROC | ||
280 | |||
281 | /* SYSENTER_RETURN points to after the "sysenter" instruction in | ||
282 | the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ | ||
283 | |||
284 | # sysenter call handler stub | ||
285 | ENTRY(sysenter_entry) | ||
286 | CFI_STARTPROC simple | ||
287 | CFI_SIGNAL_FRAME | ||
288 | CFI_DEF_CFA esp, 0 | ||
289 | CFI_REGISTER esp, ebp | ||
290 | movl TSS_sysenter_esp0(%esp),%esp | ||
291 | sysenter_past_esp: | ||
292 | /* | ||
293 | * No need to follow this irqs on/off section: the syscall | ||
294 | * disabled irqs and here we enable it straight after entry: | ||
295 | */ | ||
296 | ENABLE_INTERRUPTS(CLBR_NONE) | ||
297 | pushl $(__USER_DS) | ||
298 | CFI_ADJUST_CFA_OFFSET 4 | ||
299 | /*CFI_REL_OFFSET ss, 0*/ | ||
300 | pushl %ebp | ||
301 | CFI_ADJUST_CFA_OFFSET 4 | ||
302 | CFI_REL_OFFSET esp, 0 | ||
303 | pushfl | ||
304 | CFI_ADJUST_CFA_OFFSET 4 | ||
305 | pushl $(__USER_CS) | ||
306 | CFI_ADJUST_CFA_OFFSET 4 | ||
307 | /*CFI_REL_OFFSET cs, 0*/ | ||
308 | /* | ||
309 | * Push current_thread_info()->sysenter_return to the stack. | ||
310 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words | ||
311 | * pushed above; +8 corresponds to copy_thread's esp0 setting. | ||
312 | */ | ||
313 | pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) | ||
314 | CFI_ADJUST_CFA_OFFSET 4 | ||
315 | CFI_REL_OFFSET eip, 0 | ||
316 | |||
317 | /* | ||
318 | * Load the potential sixth argument from user stack. | ||
319 | * Careful about security. | ||
320 | */ | ||
321 | cmpl $__PAGE_OFFSET-3,%ebp | ||
322 | jae syscall_fault | ||
323 | 1: movl (%ebp),%ebp | ||
324 | .section __ex_table,"a" | ||
325 | .align 4 | ||
326 | .long 1b,syscall_fault | ||
327 | .previous | ||
328 | |||
329 | pushl %eax | ||
330 | CFI_ADJUST_CFA_OFFSET 4 | ||
331 | SAVE_ALL | ||
332 | GET_THREAD_INFO(%ebp) | ||
333 | |||
334 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ | ||
335 | testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) | ||
336 | jnz syscall_trace_entry | ||
337 | cmpl $(nr_syscalls), %eax | ||
338 | jae syscall_badsys | ||
339 | call *sys_call_table(,%eax,4) | ||
340 | movl %eax,PT_EAX(%esp) | ||
341 | DISABLE_INTERRUPTS(CLBR_ANY) | ||
342 | TRACE_IRQS_OFF | ||
343 | movl TI_flags(%ebp), %ecx | ||
344 | testw $_TIF_ALLWORK_MASK, %cx | ||
345 | jne syscall_exit_work | ||
346 | /* if something modifies registers it must also disable sysexit */ | ||
347 | movl PT_EIP(%esp), %edx | ||
348 | movl PT_OLDESP(%esp), %ecx | ||
349 | xorl %ebp,%ebp | ||
350 | TRACE_IRQS_ON | ||
351 | 1: mov PT_FS(%esp), %fs | ||
352 | ENABLE_INTERRUPTS_SYSEXIT | ||
353 | CFI_ENDPROC | ||
354 | .pushsection .fixup,"ax" | ||
355 | 2: movl $0,PT_FS(%esp) | ||
356 | jmp 1b | ||
357 | .section __ex_table,"a" | ||
358 | .align 4 | ||
359 | .long 1b,2b | ||
360 | .popsection | ||
361 | ENDPROC(sysenter_entry) | ||
362 | |||
363 | # system call handler stub | ||
364 | ENTRY(system_call) | ||
365 | RING0_INT_FRAME # can't unwind into user space anyway | ||
366 | pushl %eax # save orig_eax | ||
367 | CFI_ADJUST_CFA_OFFSET 4 | ||
368 | SAVE_ALL | ||
369 | GET_THREAD_INFO(%ebp) | ||
370 | # system call tracing in operation / emulation | ||
371 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ | ||
372 | testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) | ||
373 | jnz syscall_trace_entry | ||
374 | cmpl $(nr_syscalls), %eax | ||
375 | jae syscall_badsys | ||
376 | syscall_call: | ||
377 | call *sys_call_table(,%eax,4) | ||
378 | movl %eax,PT_EAX(%esp) # store the return value | ||
379 | syscall_exit: | ||
380 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt | ||
381 | # setting need_resched or sigpending | ||
382 | # between sampling and the iret | ||
383 | TRACE_IRQS_OFF | ||
384 | testl $TF_MASK,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit | ||
385 | jz no_singlestep | ||
386 | orl $_TIF_SINGLESTEP,TI_flags(%ebp) | ||
387 | no_singlestep: | ||
388 | movl TI_flags(%ebp), %ecx | ||
389 | testw $_TIF_ALLWORK_MASK, %cx # current->work | ||
390 | jne syscall_exit_work | ||
391 | |||
392 | restore_all: | ||
393 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS | ||
394 | # Warning: PT_OLDSS(%esp) contains the wrong/random values if we | ||
395 | # are returning to the kernel. | ||
396 | # See comments in process.c:copy_thread() for details. | ||
397 | movb PT_OLDSS(%esp), %ah | ||
398 | movb PT_CS(%esp), %al | ||
399 | andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax | ||
400 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax | ||
401 | CFI_REMEMBER_STATE | ||
402 | je ldt_ss # returning to user-space with LDT SS | ||
403 | restore_nocheck: | ||
404 | TRACE_IRQS_IRET | ||
405 | restore_nocheck_notrace: | ||
406 | RESTORE_REGS | ||
407 | addl $4, %esp # skip orig_eax/error_code | ||
408 | CFI_ADJUST_CFA_OFFSET -4 | ||
409 | 1: INTERRUPT_RETURN | ||
410 | .section .fixup,"ax" | ||
411 | iret_exc: | ||
412 | pushl $0 # no error code | ||
413 | pushl $do_iret_error | ||
414 | jmp error_code | ||
415 | .previous | ||
416 | .section __ex_table,"a" | ||
417 | .align 4 | ||
418 | .long 1b,iret_exc | ||
419 | .previous | ||
420 | |||
421 | CFI_RESTORE_STATE | ||
422 | ldt_ss: | ||
423 | larl PT_OLDSS(%esp), %eax | ||
424 | jnz restore_nocheck | ||
425 | testl $0x00400000, %eax # returning to 32bit stack? | ||
426 | jnz restore_nocheck # allright, normal return | ||
427 | |||
428 | #ifdef CONFIG_PARAVIRT | ||
429 | /* | ||
430 | * The kernel can't run on a non-flat stack if paravirt mode | ||
431 | * is active. Rather than try to fixup the high bits of | ||
432 | * ESP, bypass this code entirely. This may break DOSemu | ||
433 | * and/or Wine support in a paravirt VM, although the option | ||
434 | * is still available to implement the setting of the high | ||
435 | * 16-bits in the INTERRUPT_RETURN paravirt-op. | ||
436 | */ | ||
437 | cmpl $0, paravirt_ops+PARAVIRT_enabled | ||
438 | jne restore_nocheck | ||
439 | #endif | ||
440 | |||
441 | /* If returning to userspace with 16bit stack, | ||
442 | * try to fix the higher word of ESP, as the CPU | ||
443 | * won't restore it. | ||
444 | * This is an "official" bug of all the x86-compatible | ||
445 | * CPUs, which we can try to work around to make | ||
446 | * dosemu and wine happy. */ | ||
447 | movl PT_OLDESP(%esp), %eax | ||
448 | movl %esp, %edx | ||
449 | call patch_espfix_desc | ||
450 | pushl $__ESPFIX_SS | ||
451 | CFI_ADJUST_CFA_OFFSET 4 | ||
452 | pushl %eax | ||
453 | CFI_ADJUST_CFA_OFFSET 4 | ||
454 | DISABLE_INTERRUPTS(CLBR_EAX) | ||
455 | TRACE_IRQS_OFF | ||
456 | lss (%esp), %esp | ||
457 | CFI_ADJUST_CFA_OFFSET -8 | ||
458 | jmp restore_nocheck | ||
459 | CFI_ENDPROC | ||
460 | ENDPROC(system_call) | ||
461 | |||
462 | # perform work that needs to be done immediately before resumption | ||
463 | ALIGN | ||
464 | RING0_PTREGS_FRAME # can't unwind into user space anyway | ||
465 | work_pending: | ||
466 | testb $_TIF_NEED_RESCHED, %cl | ||
467 | jz work_notifysig | ||
468 | work_resched: | ||
469 | call schedule | ||
470 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt | ||
471 | # setting need_resched or sigpending | ||
472 | # between sampling and the iret | ||
473 | TRACE_IRQS_OFF | ||
474 | movl TI_flags(%ebp), %ecx | ||
475 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done other | ||
476 | # than syscall tracing? | ||
477 | jz restore_all | ||
478 | testb $_TIF_NEED_RESCHED, %cl | ||
479 | jnz work_resched | ||
480 | |||
481 | work_notifysig: # deal with pending signals and | ||
482 | # notify-resume requests | ||
483 | #ifdef CONFIG_VM86 | ||
484 | testl $VM_MASK, PT_EFLAGS(%esp) | ||
485 | movl %esp, %eax | ||
486 | jne work_notifysig_v86 # returning to kernel-space or | ||
487 | # vm86-space | ||
488 | xorl %edx, %edx | ||
489 | call do_notify_resume | ||
490 | jmp resume_userspace_sig | ||
491 | |||
492 | ALIGN | ||
493 | work_notifysig_v86: | ||
494 | pushl %ecx # save ti_flags for do_notify_resume | ||
495 | CFI_ADJUST_CFA_OFFSET 4 | ||
496 | call save_v86_state # %eax contains pt_regs pointer | ||
497 | popl %ecx | ||
498 | CFI_ADJUST_CFA_OFFSET -4 | ||
499 | movl %eax, %esp | ||
500 | #else | ||
501 | movl %esp, %eax | ||
502 | #endif | ||
503 | xorl %edx, %edx | ||
504 | call do_notify_resume | ||
505 | jmp resume_userspace_sig | ||
506 | END(work_pending) | ||
507 | |||
508 | # perform syscall exit tracing | ||
509 | ALIGN | ||
510 | syscall_trace_entry: | ||
511 | movl $-ENOSYS,PT_EAX(%esp) | ||
512 | movl %esp, %eax | ||
513 | xorl %edx,%edx | ||
514 | call do_syscall_trace | ||
515 | cmpl $0, %eax | ||
516 | jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU, | ||
517 | # so must skip actual syscall | ||
518 | movl PT_ORIG_EAX(%esp), %eax | ||
519 | cmpl $(nr_syscalls), %eax | ||
520 | jnae syscall_call | ||
521 | jmp syscall_exit | ||
522 | END(syscall_trace_entry) | ||
523 | |||
524 | # perform syscall exit tracing | ||
525 | ALIGN | ||
526 | syscall_exit_work: | ||
527 | testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl | ||
528 | jz work_pending | ||
529 | TRACE_IRQS_ON | ||
530 | ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call | ||
531 | # schedule() instead | ||
532 | movl %esp, %eax | ||
533 | movl $1, %edx | ||
534 | call do_syscall_trace | ||
535 | jmp resume_userspace | ||
536 | END(syscall_exit_work) | ||
537 | CFI_ENDPROC | ||
538 | |||
539 | RING0_INT_FRAME # can't unwind into user space anyway | ||
540 | syscall_fault: | ||
541 | pushl %eax # save orig_eax | ||
542 | CFI_ADJUST_CFA_OFFSET 4 | ||
543 | SAVE_ALL | ||
544 | GET_THREAD_INFO(%ebp) | ||
545 | movl $-EFAULT,PT_EAX(%esp) | ||
546 | jmp resume_userspace | ||
547 | END(syscall_fault) | ||
548 | |||
549 | syscall_badsys: | ||
550 | movl $-ENOSYS,PT_EAX(%esp) | ||
551 | jmp resume_userspace | ||
552 | END(syscall_badsys) | ||
553 | CFI_ENDPROC | ||
554 | |||
555 | #define FIXUP_ESPFIX_STACK \ | ||
556 | /* since we are on a wrong stack, we cant make it a C code :( */ \ | ||
557 | PER_CPU(gdt_page, %ebx); \ | ||
558 | GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \ | ||
559 | addl %esp, %eax; \ | ||
560 | pushl $__KERNEL_DS; \ | ||
561 | CFI_ADJUST_CFA_OFFSET 4; \ | ||
562 | pushl %eax; \ | ||
563 | CFI_ADJUST_CFA_OFFSET 4; \ | ||
564 | lss (%esp), %esp; \ | ||
565 | CFI_ADJUST_CFA_OFFSET -8; | ||
566 | #define UNWIND_ESPFIX_STACK \ | ||
567 | movl %ss, %eax; \ | ||
568 | /* see if on espfix stack */ \ | ||
569 | cmpw $__ESPFIX_SS, %ax; \ | ||
570 | jne 27f; \ | ||
571 | movl $__KERNEL_DS, %eax; \ | ||
572 | movl %eax, %ds; \ | ||
573 | movl %eax, %es; \ | ||
574 | /* switch to normal stack */ \ | ||
575 | FIXUP_ESPFIX_STACK; \ | ||
576 | 27:; | ||
577 | |||
578 | /* | ||
579 | * Build the entry stubs and pointer table with | ||
580 | * some assembler magic. | ||
581 | */ | ||
582 | .data | ||
583 | ENTRY(interrupt) | ||
584 | .text | ||
585 | |||
586 | ENTRY(irq_entries_start) | ||
587 | RING0_INT_FRAME | ||
588 | vector=0 | ||
589 | .rept NR_IRQS | ||
590 | ALIGN | ||
591 | .if vector | ||
592 | CFI_ADJUST_CFA_OFFSET -4 | ||
593 | .endif | ||
594 | 1: pushl $~(vector) | ||
595 | CFI_ADJUST_CFA_OFFSET 4 | ||
596 | jmp common_interrupt | ||
597 | .previous | ||
598 | .long 1b | ||
599 | .text | ||
600 | vector=vector+1 | ||
601 | .endr | ||
602 | END(irq_entries_start) | ||
603 | |||
604 | .previous | ||
605 | END(interrupt) | ||
606 | .previous | ||
607 | |||
608 | /* | ||
609 | * the CPU automatically disables interrupts when executing an IRQ vector, | ||
610 | * so IRQ-flags tracing has to follow that: | ||
611 | */ | ||
612 | ALIGN | ||
613 | common_interrupt: | ||
614 | SAVE_ALL | ||
615 | TRACE_IRQS_OFF | ||
616 | movl %esp,%eax | ||
617 | call do_IRQ | ||
618 | jmp ret_from_intr | ||
619 | ENDPROC(common_interrupt) | ||
620 | CFI_ENDPROC | ||
621 | |||
622 | #define BUILD_INTERRUPT(name, nr) \ | ||
623 | ENTRY(name) \ | ||
624 | RING0_INT_FRAME; \ | ||
625 | pushl $~(nr); \ | ||
626 | CFI_ADJUST_CFA_OFFSET 4; \ | ||
627 | SAVE_ALL; \ | ||
628 | TRACE_IRQS_OFF \ | ||
629 | movl %esp,%eax; \ | ||
630 | call smp_##name; \ | ||
631 | jmp ret_from_intr; \ | ||
632 | CFI_ENDPROC; \ | ||
633 | ENDPROC(name) | ||
634 | |||
635 | /* The include is where all of the SMP etc. interrupts come from */ | ||
636 | #include "entry_arch.h" | ||
637 | |||
638 | KPROBE_ENTRY(page_fault) | ||
639 | RING0_EC_FRAME | ||
640 | pushl $do_page_fault | ||
641 | CFI_ADJUST_CFA_OFFSET 4 | ||
642 | ALIGN | ||
643 | error_code: | ||
644 | /* the function address is in %fs's slot on the stack */ | ||
645 | pushl %es | ||
646 | CFI_ADJUST_CFA_OFFSET 4 | ||
647 | /*CFI_REL_OFFSET es, 0*/ | ||
648 | pushl %ds | ||
649 | CFI_ADJUST_CFA_OFFSET 4 | ||
650 | /*CFI_REL_OFFSET ds, 0*/ | ||
651 | pushl %eax | ||
652 | CFI_ADJUST_CFA_OFFSET 4 | ||
653 | CFI_REL_OFFSET eax, 0 | ||
654 | pushl %ebp | ||
655 | CFI_ADJUST_CFA_OFFSET 4 | ||
656 | CFI_REL_OFFSET ebp, 0 | ||
657 | pushl %edi | ||
658 | CFI_ADJUST_CFA_OFFSET 4 | ||
659 | CFI_REL_OFFSET edi, 0 | ||
660 | pushl %esi | ||
661 | CFI_ADJUST_CFA_OFFSET 4 | ||
662 | CFI_REL_OFFSET esi, 0 | ||
663 | pushl %edx | ||
664 | CFI_ADJUST_CFA_OFFSET 4 | ||
665 | CFI_REL_OFFSET edx, 0 | ||
666 | pushl %ecx | ||
667 | CFI_ADJUST_CFA_OFFSET 4 | ||
668 | CFI_REL_OFFSET ecx, 0 | ||
669 | pushl %ebx | ||
670 | CFI_ADJUST_CFA_OFFSET 4 | ||
671 | CFI_REL_OFFSET ebx, 0 | ||
672 | cld | ||
673 | pushl %fs | ||
674 | CFI_ADJUST_CFA_OFFSET 4 | ||
675 | /*CFI_REL_OFFSET fs, 0*/ | ||
676 | movl $(__KERNEL_PERCPU), %ecx | ||
677 | movl %ecx, %fs | ||
678 | UNWIND_ESPFIX_STACK | ||
679 | popl %ecx | ||
680 | CFI_ADJUST_CFA_OFFSET -4 | ||
681 | /*CFI_REGISTER es, ecx*/ | ||
682 | movl PT_FS(%esp), %edi # get the function address | ||
683 | movl PT_ORIG_EAX(%esp), %edx # get the error code | ||
684 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart | ||
685 | mov %ecx, PT_FS(%esp) | ||
686 | /*CFI_REL_OFFSET fs, ES*/ | ||
687 | movl $(__USER_DS), %ecx | ||
688 | movl %ecx, %ds | ||
689 | movl %ecx, %es | ||
690 | movl %esp,%eax # pt_regs pointer | ||
691 | call *%edi | ||
692 | jmp ret_from_exception | ||
693 | CFI_ENDPROC | ||
694 | KPROBE_END(page_fault) | ||
695 | |||
696 | ENTRY(coprocessor_error) | ||
697 | RING0_INT_FRAME | ||
698 | pushl $0 | ||
699 | CFI_ADJUST_CFA_OFFSET 4 | ||
700 | pushl $do_coprocessor_error | ||
701 | CFI_ADJUST_CFA_OFFSET 4 | ||
702 | jmp error_code | ||
703 | CFI_ENDPROC | ||
704 | END(coprocessor_error) | ||
705 | |||
706 | ENTRY(simd_coprocessor_error) | ||
707 | RING0_INT_FRAME | ||
708 | pushl $0 | ||
709 | CFI_ADJUST_CFA_OFFSET 4 | ||
710 | pushl $do_simd_coprocessor_error | ||
711 | CFI_ADJUST_CFA_OFFSET 4 | ||
712 | jmp error_code | ||
713 | CFI_ENDPROC | ||
714 | END(simd_coprocessor_error) | ||
715 | |||
716 | ENTRY(device_not_available) | ||
717 | RING0_INT_FRAME | ||
718 | pushl $-1 # mark this as an int | ||
719 | CFI_ADJUST_CFA_OFFSET 4 | ||
720 | SAVE_ALL | ||
721 | GET_CR0_INTO_EAX | ||
722 | testl $0x4, %eax # EM (math emulation bit) | ||
723 | jne device_not_available_emulate | ||
724 | preempt_stop(CLBR_ANY) | ||
725 | call math_state_restore | ||
726 | jmp ret_from_exception | ||
727 | device_not_available_emulate: | ||
728 | pushl $0 # temporary storage for ORIG_EIP | ||
729 | CFI_ADJUST_CFA_OFFSET 4 | ||
730 | call math_emulate | ||
731 | addl $4, %esp | ||
732 | CFI_ADJUST_CFA_OFFSET -4 | ||
733 | jmp ret_from_exception | ||
734 | CFI_ENDPROC | ||
735 | END(device_not_available) | ||
736 | |||
737 | /* | ||
738 | * Debug traps and NMI can happen at the one SYSENTER instruction | ||
739 | * that sets up the real kernel stack. Check here, since we can't | ||
740 | * allow the wrong stack to be used. | ||
741 | * | ||
742 | * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have | ||
743 | * already pushed 3 words if it hits on the sysenter instruction: | ||
744 | * eflags, cs and eip. | ||
745 | * | ||
746 | * We just load the right stack, and push the three (known) values | ||
747 | * by hand onto the new stack - while updating the return eip past | ||
748 | * the instruction that would have done it for sysenter. | ||
749 | */ | ||
750 | #define FIX_STACK(offset, ok, label) \ | ||
751 | cmpw $__KERNEL_CS,4(%esp); \ | ||
752 | jne ok; \ | ||
753 | label: \ | ||
754 | movl TSS_sysenter_esp0+offset(%esp),%esp; \ | ||
755 | CFI_DEF_CFA esp, 0; \ | ||
756 | CFI_UNDEFINED eip; \ | ||
757 | pushfl; \ | ||
758 | CFI_ADJUST_CFA_OFFSET 4; \ | ||
759 | pushl $__KERNEL_CS; \ | ||
760 | CFI_ADJUST_CFA_OFFSET 4; \ | ||
761 | pushl $sysenter_past_esp; \ | ||
762 | CFI_ADJUST_CFA_OFFSET 4; \ | ||
763 | CFI_REL_OFFSET eip, 0 | ||
764 | |||
765 | KPROBE_ENTRY(debug) | ||
766 | RING0_INT_FRAME | ||
767 | cmpl $sysenter_entry,(%esp) | ||
768 | jne debug_stack_correct | ||
769 | FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) | ||
770 | debug_stack_correct: | ||
771 | pushl $-1 # mark this as an int | ||
772 | CFI_ADJUST_CFA_OFFSET 4 | ||
773 | SAVE_ALL | ||
774 | xorl %edx,%edx # error code 0 | ||
775 | movl %esp,%eax # pt_regs pointer | ||
776 | call do_debug | ||
777 | jmp ret_from_exception | ||
778 | CFI_ENDPROC | ||
779 | KPROBE_END(debug) | ||
780 | |||
781 | /* | ||
782 | * NMI is doubly nasty. It can happen _while_ we're handling | ||
783 | * a debug fault, and the debug fault hasn't yet been able to | ||
784 | * clear up the stack. So we first check whether we got an | ||
785 | * NMI on the sysenter entry path, but after that we need to | ||
786 | * check whether we got an NMI on the debug path where the debug | ||
787 | * fault happened on the sysenter path. | ||
788 | */ | ||
789 | KPROBE_ENTRY(nmi) | ||
790 | RING0_INT_FRAME | ||
791 | pushl %eax | ||
792 | CFI_ADJUST_CFA_OFFSET 4 | ||
793 | movl %ss, %eax | ||
794 | cmpw $__ESPFIX_SS, %ax | ||
795 | popl %eax | ||
796 | CFI_ADJUST_CFA_OFFSET -4 | ||
797 | je nmi_espfix_stack | ||
798 | cmpl $sysenter_entry,(%esp) | ||
799 | je nmi_stack_fixup | ||
800 | pushl %eax | ||
801 | CFI_ADJUST_CFA_OFFSET 4 | ||
802 | movl %esp,%eax | ||
803 | /* Do not access memory above the end of our stack page, | ||
804 | * it might not exist. | ||
805 | */ | ||
806 | andl $(THREAD_SIZE-1),%eax | ||
807 | cmpl $(THREAD_SIZE-20),%eax | ||
808 | popl %eax | ||
809 | CFI_ADJUST_CFA_OFFSET -4 | ||
810 | jae nmi_stack_correct | ||
811 | cmpl $sysenter_entry,12(%esp) | ||
812 | je nmi_debug_stack_check | ||
813 | nmi_stack_correct: | ||
814 | /* We have a RING0_INT_FRAME here */ | ||
815 | pushl %eax | ||
816 | CFI_ADJUST_CFA_OFFSET 4 | ||
817 | SAVE_ALL | ||
818 | xorl %edx,%edx # zero error code | ||
819 | movl %esp,%eax # pt_regs pointer | ||
820 | call do_nmi | ||
821 | jmp restore_nocheck_notrace | ||
822 | CFI_ENDPROC | ||
823 | |||
824 | nmi_stack_fixup: | ||
825 | RING0_INT_FRAME | ||
826 | FIX_STACK(12,nmi_stack_correct, 1) | ||
827 | jmp nmi_stack_correct | ||
828 | |||
829 | nmi_debug_stack_check: | ||
830 | /* We have a RING0_INT_FRAME here */ | ||
831 | cmpw $__KERNEL_CS,16(%esp) | ||
832 | jne nmi_stack_correct | ||
833 | cmpl $debug,(%esp) | ||
834 | jb nmi_stack_correct | ||
835 | cmpl $debug_esp_fix_insn,(%esp) | ||
836 | ja nmi_stack_correct | ||
837 | FIX_STACK(24,nmi_stack_correct, 1) | ||
838 | jmp nmi_stack_correct | ||
839 | |||
840 | nmi_espfix_stack: | ||
841 | /* We have a RING0_INT_FRAME here. | ||
842 | * | ||
843 | * create the pointer to lss back | ||
844 | */ | ||
845 | pushl %ss | ||
846 | CFI_ADJUST_CFA_OFFSET 4 | ||
847 | pushl %esp | ||
848 | CFI_ADJUST_CFA_OFFSET 4 | ||
849 | addw $4, (%esp) | ||
850 | /* copy the iret frame of 12 bytes */ | ||
851 | .rept 3 | ||
852 | pushl 16(%esp) | ||
853 | CFI_ADJUST_CFA_OFFSET 4 | ||
854 | .endr | ||
855 | pushl %eax | ||
856 | CFI_ADJUST_CFA_OFFSET 4 | ||
857 | SAVE_ALL | ||
858 | FIXUP_ESPFIX_STACK # %eax == %esp | ||
859 | xorl %edx,%edx # zero error code | ||
860 | call do_nmi | ||
861 | RESTORE_REGS | ||
862 | lss 12+4(%esp), %esp # back to espfix stack | ||
863 | CFI_ADJUST_CFA_OFFSET -24 | ||
864 | 1: INTERRUPT_RETURN | ||
865 | CFI_ENDPROC | ||
866 | .section __ex_table,"a" | ||
867 | .align 4 | ||
868 | .long 1b,iret_exc | ||
869 | .previous | ||
870 | KPROBE_END(nmi) | ||
871 | |||
872 | #ifdef CONFIG_PARAVIRT | ||
873 | ENTRY(native_iret) | ||
874 | 1: iret | ||
875 | .section __ex_table,"a" | ||
876 | .align 4 | ||
877 | .long 1b,iret_exc | ||
878 | .previous | ||
879 | END(native_iret) | ||
880 | |||
881 | ENTRY(native_irq_enable_sysexit) | ||
882 | sti | ||
883 | sysexit | ||
884 | END(native_irq_enable_sysexit) | ||
885 | #endif | ||
886 | |||
887 | KPROBE_ENTRY(int3) | ||
888 | RING0_INT_FRAME | ||
889 | pushl $-1 # mark this as an int | ||
890 | CFI_ADJUST_CFA_OFFSET 4 | ||
891 | SAVE_ALL | ||
892 | xorl %edx,%edx # zero error code | ||
893 | movl %esp,%eax # pt_regs pointer | ||
894 | call do_int3 | ||
895 | jmp ret_from_exception | ||
896 | CFI_ENDPROC | ||
897 | KPROBE_END(int3) | ||
898 | |||
899 | ENTRY(overflow) | ||
900 | RING0_INT_FRAME | ||
901 | pushl $0 | ||
902 | CFI_ADJUST_CFA_OFFSET 4 | ||
903 | pushl $do_overflow | ||
904 | CFI_ADJUST_CFA_OFFSET 4 | ||
905 | jmp error_code | ||
906 | CFI_ENDPROC | ||
907 | END(overflow) | ||
908 | |||
909 | ENTRY(bounds) | ||
910 | RING0_INT_FRAME | ||
911 | pushl $0 | ||
912 | CFI_ADJUST_CFA_OFFSET 4 | ||
913 | pushl $do_bounds | ||
914 | CFI_ADJUST_CFA_OFFSET 4 | ||
915 | jmp error_code | ||
916 | CFI_ENDPROC | ||
917 | END(bounds) | ||
918 | |||
919 | ENTRY(invalid_op) | ||
920 | RING0_INT_FRAME | ||
921 | pushl $0 | ||
922 | CFI_ADJUST_CFA_OFFSET 4 | ||
923 | pushl $do_invalid_op | ||
924 | CFI_ADJUST_CFA_OFFSET 4 | ||
925 | jmp error_code | ||
926 | CFI_ENDPROC | ||
927 | END(invalid_op) | ||
928 | |||
929 | ENTRY(coprocessor_segment_overrun) | ||
930 | RING0_INT_FRAME | ||
931 | pushl $0 | ||
932 | CFI_ADJUST_CFA_OFFSET 4 | ||
933 | pushl $do_coprocessor_segment_overrun | ||
934 | CFI_ADJUST_CFA_OFFSET 4 | ||
935 | jmp error_code | ||
936 | CFI_ENDPROC | ||
937 | END(coprocessor_segment_overrun) | ||
938 | |||
939 | ENTRY(invalid_TSS) | ||
940 | RING0_EC_FRAME | ||
941 | pushl $do_invalid_TSS | ||
942 | CFI_ADJUST_CFA_OFFSET 4 | ||
943 | jmp error_code | ||
944 | CFI_ENDPROC | ||
945 | END(invalid_TSS) | ||
946 | |||
947 | ENTRY(segment_not_present) | ||
948 | RING0_EC_FRAME | ||
949 | pushl $do_segment_not_present | ||
950 | CFI_ADJUST_CFA_OFFSET 4 | ||
951 | jmp error_code | ||
952 | CFI_ENDPROC | ||
953 | END(segment_not_present) | ||
954 | |||
955 | ENTRY(stack_segment) | ||
956 | RING0_EC_FRAME | ||
957 | pushl $do_stack_segment | ||
958 | CFI_ADJUST_CFA_OFFSET 4 | ||
959 | jmp error_code | ||
960 | CFI_ENDPROC | ||
961 | END(stack_segment) | ||
962 | |||
963 | KPROBE_ENTRY(general_protection) | ||
964 | RING0_EC_FRAME | ||
965 | pushl $do_general_protection | ||
966 | CFI_ADJUST_CFA_OFFSET 4 | ||
967 | jmp error_code | ||
968 | CFI_ENDPROC | ||
969 | KPROBE_END(general_protection) | ||
970 | |||
971 | ENTRY(alignment_check) | ||
972 | RING0_EC_FRAME | ||
973 | pushl $do_alignment_check | ||
974 | CFI_ADJUST_CFA_OFFSET 4 | ||
975 | jmp error_code | ||
976 | CFI_ENDPROC | ||
977 | END(alignment_check) | ||
978 | |||
979 | ENTRY(divide_error) | ||
980 | RING0_INT_FRAME | ||
981 | pushl $0 # no error code | ||
982 | CFI_ADJUST_CFA_OFFSET 4 | ||
983 | pushl $do_divide_error | ||
984 | CFI_ADJUST_CFA_OFFSET 4 | ||
985 | jmp error_code | ||
986 | CFI_ENDPROC | ||
987 | END(divide_error) | ||
988 | |||
989 | #ifdef CONFIG_X86_MCE | ||
990 | ENTRY(machine_check) | ||
991 | RING0_INT_FRAME | ||
992 | pushl $0 | ||
993 | CFI_ADJUST_CFA_OFFSET 4 | ||
994 | pushl machine_check_vector | ||
995 | CFI_ADJUST_CFA_OFFSET 4 | ||
996 | jmp error_code | ||
997 | CFI_ENDPROC | ||
998 | END(machine_check) | ||
999 | #endif | ||
1000 | |||
1001 | ENTRY(spurious_interrupt_bug) | ||
1002 | RING0_INT_FRAME | ||
1003 | pushl $0 | ||
1004 | CFI_ADJUST_CFA_OFFSET 4 | ||
1005 | pushl $do_spurious_interrupt_bug | ||
1006 | CFI_ADJUST_CFA_OFFSET 4 | ||
1007 | jmp error_code | ||
1008 | CFI_ENDPROC | ||
1009 | END(spurious_interrupt_bug) | ||
1010 | |||
1011 | ENTRY(kernel_thread_helper) | ||
1012 | pushl $0 # fake return address for unwinder | ||
1013 | CFI_STARTPROC | ||
1014 | movl %edx,%eax | ||
1015 | push %edx | ||
1016 | CFI_ADJUST_CFA_OFFSET 4 | ||
1017 | call *%ebx | ||
1018 | push %eax | ||
1019 | CFI_ADJUST_CFA_OFFSET 4 | ||
1020 | call do_exit | ||
1021 | CFI_ENDPROC | ||
1022 | ENDPROC(kernel_thread_helper) | ||
1023 | |||
1024 | #ifdef CONFIG_XEN | ||
1025 | ENTRY(xen_hypervisor_callback) | ||
1026 | CFI_STARTPROC | ||
1027 | pushl $0 | ||
1028 | CFI_ADJUST_CFA_OFFSET 4 | ||
1029 | SAVE_ALL | ||
1030 | TRACE_IRQS_OFF | ||
1031 | |||
1032 | /* Check to see if we got the event in the critical | ||
1033 | region in xen_iret_direct, after we've reenabled | ||
1034 | events and checked for pending events. This simulates | ||
1035 | iret instruction's behaviour where it delivers a | ||
1036 | pending interrupt when enabling interrupts. */ | ||
1037 | movl PT_EIP(%esp),%eax | ||
1038 | cmpl $xen_iret_start_crit,%eax | ||
1039 | jb 1f | ||
1040 | cmpl $xen_iret_end_crit,%eax | ||
1041 | jae 1f | ||
1042 | |||
1043 | call xen_iret_crit_fixup | ||
1044 | |||
1045 | 1: mov %esp, %eax | ||
1046 | call xen_evtchn_do_upcall | ||
1047 | jmp ret_from_intr | ||
1048 | CFI_ENDPROC | ||
1049 | ENDPROC(xen_hypervisor_callback) | ||
1050 | |||
1051 | # Hypervisor uses this for application faults while it executes. | ||
1052 | # We get here for two reasons: | ||
1053 | # 1. Fault while reloading DS, ES, FS or GS | ||
1054 | # 2. Fault while executing IRET | ||
1055 | # Category 1 we fix up by reattempting the load, and zeroing the segment | ||
1056 | # register if the load fails. | ||
1057 | # Category 2 we fix up by jumping to do_iret_error. We cannot use the | ||
1058 | # normal Linux return path in this case because if we use the IRET hypercall | ||
1059 | # to pop the stack frame we end up in an infinite loop of failsafe callbacks. | ||
1060 | # We distinguish between categories by maintaining a status value in EAX. | ||
1061 | ENTRY(xen_failsafe_callback) | ||
1062 | CFI_STARTPROC | ||
1063 | pushl %eax | ||
1064 | CFI_ADJUST_CFA_OFFSET 4 | ||
1065 | movl $1,%eax | ||
1066 | 1: mov 4(%esp),%ds | ||
1067 | 2: mov 8(%esp),%es | ||
1068 | 3: mov 12(%esp),%fs | ||
1069 | 4: mov 16(%esp),%gs | ||
1070 | testl %eax,%eax | ||
1071 | popl %eax | ||
1072 | CFI_ADJUST_CFA_OFFSET -4 | ||
1073 | lea 16(%esp),%esp | ||
1074 | CFI_ADJUST_CFA_OFFSET -16 | ||
1075 | jz 5f | ||
1076 | addl $16,%esp | ||
1077 | jmp iret_exc # EAX != 0 => Category 2 (Bad IRET) | ||
1078 | 5: pushl $0 # EAX == 0 => Category 1 (Bad segment) | ||
1079 | CFI_ADJUST_CFA_OFFSET 4 | ||
1080 | SAVE_ALL | ||
1081 | jmp ret_from_exception | ||
1082 | CFI_ENDPROC | ||
1083 | |||
1084 | .section .fixup,"ax" | ||
1085 | 6: xorl %eax,%eax | ||
1086 | movl %eax,4(%esp) | ||
1087 | jmp 1b | ||
1088 | 7: xorl %eax,%eax | ||
1089 | movl %eax,8(%esp) | ||
1090 | jmp 2b | ||
1091 | 8: xorl %eax,%eax | ||
1092 | movl %eax,12(%esp) | ||
1093 | jmp 3b | ||
1094 | 9: xorl %eax,%eax | ||
1095 | movl %eax,16(%esp) | ||
1096 | jmp 4b | ||
1097 | .previous | ||
1098 | .section __ex_table,"a" | ||
1099 | .align 4 | ||
1100 | .long 1b,6b | ||
1101 | .long 2b,7b | ||
1102 | .long 3b,8b | ||
1103 | .long 4b,9b | ||
1104 | .previous | ||
1105 | ENDPROC(xen_failsafe_callback) | ||
1106 | |||
1107 | #endif /* CONFIG_XEN */ | ||
1108 | |||
1109 | .section .rodata,"a" | ||
1110 | #include "syscall_table_32.S" | ||
1111 | |||
1112 | syscall_table_size=(.-sys_call_table) | ||