diff options
Diffstat (limited to 'arch/i386/kernel/entry.S')
-rw-r--r-- | arch/i386/kernel/entry.S | 285 |
1 files changed, 269 insertions, 16 deletions
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index cfc683f153b9..fbdb933251b6 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <asm/smp.h> | 48 | #include <asm/smp.h> |
49 | #include <asm/page.h> | 49 | #include <asm/page.h> |
50 | #include <asm/desc.h> | 50 | #include <asm/desc.h> |
51 | #include <asm/dwarf2.h> | ||
51 | #include "irq_vectors.h" | 52 | #include "irq_vectors.h" |
52 | 53 | ||
53 | #define nr_syscalls ((syscall_table_size)/4) | 54 | #define nr_syscalls ((syscall_table_size)/4) |
@@ -82,34 +83,76 @@ VM_MASK = 0x00020000 | |||
82 | #define resume_kernel restore_nocheck | 83 | #define resume_kernel restore_nocheck |
83 | #endif | 84 | #endif |
84 | 85 | ||
86 | #ifdef CONFIG_VM86 | ||
87 | #define resume_userspace_sig check_userspace | ||
88 | #else | ||
89 | #define resume_userspace_sig resume_userspace | ||
90 | #endif | ||
91 | |||
85 | #define SAVE_ALL \ | 92 | #define SAVE_ALL \ |
86 | cld; \ | 93 | cld; \ |
87 | pushl %es; \ | 94 | pushl %es; \ |
95 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
96 | /*CFI_REL_OFFSET es, 0;*/\ | ||
88 | pushl %ds; \ | 97 | pushl %ds; \ |
98 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
99 | /*CFI_REL_OFFSET ds, 0;*/\ | ||
89 | pushl %eax; \ | 100 | pushl %eax; \ |
101 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
102 | CFI_REL_OFFSET eax, 0;\ | ||
90 | pushl %ebp; \ | 103 | pushl %ebp; \ |
104 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
105 | CFI_REL_OFFSET ebp, 0;\ | ||
91 | pushl %edi; \ | 106 | pushl %edi; \ |
107 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
108 | CFI_REL_OFFSET edi, 0;\ | ||
92 | pushl %esi; \ | 109 | pushl %esi; \ |
110 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
111 | CFI_REL_OFFSET esi, 0;\ | ||
93 | pushl %edx; \ | 112 | pushl %edx; \ |
113 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
114 | CFI_REL_OFFSET edx, 0;\ | ||
94 | pushl %ecx; \ | 115 | pushl %ecx; \ |
116 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
117 | CFI_REL_OFFSET ecx, 0;\ | ||
95 | pushl %ebx; \ | 118 | pushl %ebx; \ |
119 | CFI_ADJUST_CFA_OFFSET 4;\ | ||
120 | CFI_REL_OFFSET ebx, 0;\ | ||
96 | movl $(__USER_DS), %edx; \ | 121 | movl $(__USER_DS), %edx; \ |
97 | movl %edx, %ds; \ | 122 | movl %edx, %ds; \ |
98 | movl %edx, %es; | 123 | movl %edx, %es; |
99 | 124 | ||
100 | #define RESTORE_INT_REGS \ | 125 | #define RESTORE_INT_REGS \ |
101 | popl %ebx; \ | 126 | popl %ebx; \ |
127 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
128 | CFI_RESTORE ebx;\ | ||
102 | popl %ecx; \ | 129 | popl %ecx; \ |
130 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
131 | CFI_RESTORE ecx;\ | ||
103 | popl %edx; \ | 132 | popl %edx; \ |
133 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
134 | CFI_RESTORE edx;\ | ||
104 | popl %esi; \ | 135 | popl %esi; \ |
136 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
137 | CFI_RESTORE esi;\ | ||
105 | popl %edi; \ | 138 | popl %edi; \ |
139 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
140 | CFI_RESTORE edi;\ | ||
106 | popl %ebp; \ | 141 | popl %ebp; \ |
107 | popl %eax | 142 | CFI_ADJUST_CFA_OFFSET -4;\ |
143 | CFI_RESTORE ebp;\ | ||
144 | popl %eax; \ | ||
145 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
146 | CFI_RESTORE eax | ||
108 | 147 | ||
109 | #define RESTORE_REGS \ | 148 | #define RESTORE_REGS \ |
110 | RESTORE_INT_REGS; \ | 149 | RESTORE_INT_REGS; \ |
111 | 1: popl %ds; \ | 150 | 1: popl %ds; \ |
151 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
152 | /*CFI_RESTORE ds;*/\ | ||
112 | 2: popl %es; \ | 153 | 2: popl %es; \ |
154 | CFI_ADJUST_CFA_OFFSET -4;\ | ||
155 | /*CFI_RESTORE es;*/\ | ||
113 | .section .fixup,"ax"; \ | 156 | .section .fixup,"ax"; \ |
114 | 3: movl $0,(%esp); \ | 157 | 3: movl $0,(%esp); \ |
115 | jmp 1b; \ | 158 | jmp 1b; \ |
@@ -122,13 +165,43 @@ VM_MASK = 0x00020000 | |||
122 | .long 2b,4b; \ | 165 | .long 2b,4b; \ |
123 | .previous | 166 | .previous |
124 | 167 | ||
168 | #define RING0_INT_FRAME \ | ||
169 | CFI_STARTPROC simple;\ | ||
170 | CFI_DEF_CFA esp, 3*4;\ | ||
171 | /*CFI_OFFSET cs, -2*4;*/\ | ||
172 | CFI_OFFSET eip, -3*4 | ||
173 | |||
174 | #define RING0_EC_FRAME \ | ||
175 | CFI_STARTPROC simple;\ | ||
176 | CFI_DEF_CFA esp, 4*4;\ | ||
177 | /*CFI_OFFSET cs, -2*4;*/\ | ||
178 | CFI_OFFSET eip, -3*4 | ||
179 | |||
180 | #define RING0_PTREGS_FRAME \ | ||
181 | CFI_STARTPROC simple;\ | ||
182 | CFI_DEF_CFA esp, OLDESP-EBX;\ | ||
183 | /*CFI_OFFSET cs, CS-OLDESP;*/\ | ||
184 | CFI_OFFSET eip, EIP-OLDESP;\ | ||
185 | /*CFI_OFFSET es, ES-OLDESP;*/\ | ||
186 | /*CFI_OFFSET ds, DS-OLDESP;*/\ | ||
187 | CFI_OFFSET eax, EAX-OLDESP;\ | ||
188 | CFI_OFFSET ebp, EBP-OLDESP;\ | ||
189 | CFI_OFFSET edi, EDI-OLDESP;\ | ||
190 | CFI_OFFSET esi, ESI-OLDESP;\ | ||
191 | CFI_OFFSET edx, EDX-OLDESP;\ | ||
192 | CFI_OFFSET ecx, ECX-OLDESP;\ | ||
193 | CFI_OFFSET ebx, EBX-OLDESP | ||
125 | 194 | ||
126 | ENTRY(ret_from_fork) | 195 | ENTRY(ret_from_fork) |
196 | CFI_STARTPROC | ||
127 | pushl %eax | 197 | pushl %eax |
198 | CFI_ADJUST_CFA_OFFSET -4 | ||
128 | call schedule_tail | 199 | call schedule_tail |
129 | GET_THREAD_INFO(%ebp) | 200 | GET_THREAD_INFO(%ebp) |
130 | popl %eax | 201 | popl %eax |
202 | CFI_ADJUST_CFA_OFFSET -4 | ||
131 | jmp syscall_exit | 203 | jmp syscall_exit |
204 | CFI_ENDPROC | ||
132 | 205 | ||
133 | /* | 206 | /* |
134 | * Return to user mode is not as complex as all this looks, | 207 | * Return to user mode is not as complex as all this looks, |
@@ -139,10 +212,12 @@ ENTRY(ret_from_fork) | |||
139 | 212 | ||
140 | # userspace resumption stub bypassing syscall exit tracing | 213 | # userspace resumption stub bypassing syscall exit tracing |
141 | ALIGN | 214 | ALIGN |
215 | RING0_PTREGS_FRAME | ||
142 | ret_from_exception: | 216 | ret_from_exception: |
143 | preempt_stop | 217 | preempt_stop |
144 | ret_from_intr: | 218 | ret_from_intr: |
145 | GET_THREAD_INFO(%ebp) | 219 | GET_THREAD_INFO(%ebp) |
220 | check_userspace: | ||
146 | movl EFLAGS(%esp), %eax # mix EFLAGS and CS | 221 | movl EFLAGS(%esp), %eax # mix EFLAGS and CS |
147 | movb CS(%esp), %al | 222 | movb CS(%esp), %al |
148 | testl $(VM_MASK | 3), %eax | 223 | testl $(VM_MASK | 3), %eax |
@@ -171,20 +246,38 @@ need_resched: | |||
171 | call preempt_schedule_irq | 246 | call preempt_schedule_irq |
172 | jmp need_resched | 247 | jmp need_resched |
173 | #endif | 248 | #endif |
249 | CFI_ENDPROC | ||
174 | 250 | ||
175 | /* SYSENTER_RETURN points to after the "sysenter" instruction in | 251 | /* SYSENTER_RETURN points to after the "sysenter" instruction in |
176 | the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ | 252 | the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ |
177 | 253 | ||
178 | # sysenter call handler stub | 254 | # sysenter call handler stub |
179 | ENTRY(sysenter_entry) | 255 | ENTRY(sysenter_entry) |
256 | CFI_STARTPROC simple | ||
257 | CFI_DEF_CFA esp, 0 | ||
258 | CFI_REGISTER esp, ebp | ||
180 | movl TSS_sysenter_esp0(%esp),%esp | 259 | movl TSS_sysenter_esp0(%esp),%esp |
181 | sysenter_past_esp: | 260 | sysenter_past_esp: |
182 | sti | 261 | sti |
183 | pushl $(__USER_DS) | 262 | pushl $(__USER_DS) |
263 | CFI_ADJUST_CFA_OFFSET 4 | ||
264 | /*CFI_REL_OFFSET ss, 0*/ | ||
184 | pushl %ebp | 265 | pushl %ebp |
266 | CFI_ADJUST_CFA_OFFSET 4 | ||
267 | CFI_REL_OFFSET esp, 0 | ||
185 | pushfl | 268 | pushfl |
269 | CFI_ADJUST_CFA_OFFSET 4 | ||
186 | pushl $(__USER_CS) | 270 | pushl $(__USER_CS) |
187 | pushl $SYSENTER_RETURN | 271 | CFI_ADJUST_CFA_OFFSET 4 |
272 | /*CFI_REL_OFFSET cs, 0*/ | ||
273 | /* | ||
274 | * Push current_thread_info()->sysenter_return to the stack. | ||
275 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words | ||
276 | * pushed above; +8 corresponds to copy_thread's esp0 setting. | ||
277 | */ | ||
278 | pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) | ||
279 | CFI_ADJUST_CFA_OFFSET 4 | ||
280 | CFI_REL_OFFSET eip, 0 | ||
188 | 281 | ||
189 | /* | 282 | /* |
190 | * Load the potential sixth argument from user stack. | 283 | * Load the potential sixth argument from user stack. |
@@ -199,6 +292,7 @@ sysenter_past_esp: | |||
199 | .previous | 292 | .previous |
200 | 293 | ||
201 | pushl %eax | 294 | pushl %eax |
295 | CFI_ADJUST_CFA_OFFSET 4 | ||
202 | SAVE_ALL | 296 | SAVE_ALL |
203 | GET_THREAD_INFO(%ebp) | 297 | GET_THREAD_INFO(%ebp) |
204 | 298 | ||
@@ -219,11 +313,14 @@ sysenter_past_esp: | |||
219 | xorl %ebp,%ebp | 313 | xorl %ebp,%ebp |
220 | sti | 314 | sti |
221 | sysexit | 315 | sysexit |
316 | CFI_ENDPROC | ||
222 | 317 | ||
223 | 318 | ||
224 | # system call handler stub | 319 | # system call handler stub |
225 | ENTRY(system_call) | 320 | ENTRY(system_call) |
321 | RING0_INT_FRAME # can't unwind into user space anyway | ||
226 | pushl %eax # save orig_eax | 322 | pushl %eax # save orig_eax |
323 | CFI_ADJUST_CFA_OFFSET 4 | ||
227 | SAVE_ALL | 324 | SAVE_ALL |
228 | GET_THREAD_INFO(%ebp) | 325 | GET_THREAD_INFO(%ebp) |
229 | testl $TF_MASK,EFLAGS(%esp) | 326 | testl $TF_MASK,EFLAGS(%esp) |
@@ -256,10 +353,12 @@ restore_all: | |||
256 | movb CS(%esp), %al | 353 | movb CS(%esp), %al |
257 | andl $(VM_MASK | (4 << 8) | 3), %eax | 354 | andl $(VM_MASK | (4 << 8) | 3), %eax |
258 | cmpl $((4 << 8) | 3), %eax | 355 | cmpl $((4 << 8) | 3), %eax |
356 | CFI_REMEMBER_STATE | ||
259 | je ldt_ss # returning to user-space with LDT SS | 357 | je ldt_ss # returning to user-space with LDT SS |
260 | restore_nocheck: | 358 | restore_nocheck: |
261 | RESTORE_REGS | 359 | RESTORE_REGS |
262 | addl $4, %esp | 360 | addl $4, %esp |
361 | CFI_ADJUST_CFA_OFFSET -4 | ||
263 | 1: iret | 362 | 1: iret |
264 | .section .fixup,"ax" | 363 | .section .fixup,"ax" |
265 | iret_exc: | 364 | iret_exc: |
@@ -273,6 +372,7 @@ iret_exc: | |||
273 | .long 1b,iret_exc | 372 | .long 1b,iret_exc |
274 | .previous | 373 | .previous |
275 | 374 | ||
375 | CFI_RESTORE_STATE | ||
276 | ldt_ss: | 376 | ldt_ss: |
277 | larl OLDSS(%esp), %eax | 377 | larl OLDSS(%esp), %eax |
278 | jnz restore_nocheck | 378 | jnz restore_nocheck |
@@ -285,11 +385,13 @@ ldt_ss: | |||
285 | * CPUs, which we can try to work around to make | 385 | * CPUs, which we can try to work around to make |
286 | * dosemu and wine happy. */ | 386 | * dosemu and wine happy. */ |
287 | subl $8, %esp # reserve space for switch16 pointer | 387 | subl $8, %esp # reserve space for switch16 pointer |
388 | CFI_ADJUST_CFA_OFFSET 8 | ||
288 | cli | 389 | cli |
289 | movl %esp, %eax | 390 | movl %esp, %eax |
290 | /* Set up the 16bit stack frame with switch32 pointer on top, | 391 | /* Set up the 16bit stack frame with switch32 pointer on top, |
291 | * and a switch16 pointer on top of the current frame. */ | 392 | * and a switch16 pointer on top of the current frame. */ |
292 | call setup_x86_bogus_stack | 393 | call setup_x86_bogus_stack |
394 | CFI_ADJUST_CFA_OFFSET -8 # frame has moved | ||
293 | RESTORE_REGS | 395 | RESTORE_REGS |
294 | lss 20+4(%esp), %esp # switch to 16bit stack | 396 | lss 20+4(%esp), %esp # switch to 16bit stack |
295 | 1: iret | 397 | 1: iret |
@@ -297,9 +399,11 @@ ldt_ss: | |||
297 | .align 4 | 399 | .align 4 |
298 | .long 1b,iret_exc | 400 | .long 1b,iret_exc |
299 | .previous | 401 | .previous |
402 | CFI_ENDPROC | ||
300 | 403 | ||
301 | # perform work that needs to be done immediately before resumption | 404 | # perform work that needs to be done immediately before resumption |
302 | ALIGN | 405 | ALIGN |
406 | RING0_PTREGS_FRAME # can't unwind into user space anyway | ||
303 | work_pending: | 407 | work_pending: |
304 | testb $_TIF_NEED_RESCHED, %cl | 408 | testb $_TIF_NEED_RESCHED, %cl |
305 | jz work_notifysig | 409 | jz work_notifysig |
@@ -323,18 +427,20 @@ work_notifysig: # deal with pending signals and | |||
323 | # vm86-space | 427 | # vm86-space |
324 | xorl %edx, %edx | 428 | xorl %edx, %edx |
325 | call do_notify_resume | 429 | call do_notify_resume |
326 | jmp resume_userspace | 430 | jmp resume_userspace_sig |
327 | 431 | ||
328 | ALIGN | 432 | ALIGN |
329 | work_notifysig_v86: | 433 | work_notifysig_v86: |
330 | #ifdef CONFIG_VM86 | 434 | #ifdef CONFIG_VM86 |
331 | pushl %ecx # save ti_flags for do_notify_resume | 435 | pushl %ecx # save ti_flags for do_notify_resume |
436 | CFI_ADJUST_CFA_OFFSET 4 | ||
332 | call save_v86_state # %eax contains pt_regs pointer | 437 | call save_v86_state # %eax contains pt_regs pointer |
333 | popl %ecx | 438 | popl %ecx |
439 | CFI_ADJUST_CFA_OFFSET -4 | ||
334 | movl %eax, %esp | 440 | movl %eax, %esp |
335 | xorl %edx, %edx | 441 | xorl %edx, %edx |
336 | call do_notify_resume | 442 | call do_notify_resume |
337 | jmp resume_userspace | 443 | jmp resume_userspace_sig |
338 | #endif | 444 | #endif |
339 | 445 | ||
340 | # perform syscall exit tracing | 446 | # perform syscall exit tracing |
@@ -363,19 +469,21 @@ syscall_exit_work: | |||
363 | movl $1, %edx | 469 | movl $1, %edx |
364 | call do_syscall_trace | 470 | call do_syscall_trace |
365 | jmp resume_userspace | 471 | jmp resume_userspace |
472 | CFI_ENDPROC | ||
366 | 473 | ||
367 | ALIGN | 474 | RING0_INT_FRAME # can't unwind into user space anyway |
368 | syscall_fault: | 475 | syscall_fault: |
369 | pushl %eax # save orig_eax | 476 | pushl %eax # save orig_eax |
477 | CFI_ADJUST_CFA_OFFSET 4 | ||
370 | SAVE_ALL | 478 | SAVE_ALL |
371 | GET_THREAD_INFO(%ebp) | 479 | GET_THREAD_INFO(%ebp) |
372 | movl $-EFAULT,EAX(%esp) | 480 | movl $-EFAULT,EAX(%esp) |
373 | jmp resume_userspace | 481 | jmp resume_userspace |
374 | 482 | ||
375 | ALIGN | ||
376 | syscall_badsys: | 483 | syscall_badsys: |
377 | movl $-ENOSYS,EAX(%esp) | 484 | movl $-ENOSYS,EAX(%esp) |
378 | jmp resume_userspace | 485 | jmp resume_userspace |
486 | CFI_ENDPROC | ||
379 | 487 | ||
380 | #define FIXUP_ESPFIX_STACK \ | 488 | #define FIXUP_ESPFIX_STACK \ |
381 | movl %esp, %eax; \ | 489 | movl %esp, %eax; \ |
@@ -387,16 +495,21 @@ syscall_badsys: | |||
387 | movl %eax, %esp; | 495 | movl %eax, %esp; |
388 | #define UNWIND_ESPFIX_STACK \ | 496 | #define UNWIND_ESPFIX_STACK \ |
389 | pushl %eax; \ | 497 | pushl %eax; \ |
498 | CFI_ADJUST_CFA_OFFSET 4; \ | ||
390 | movl %ss, %eax; \ | 499 | movl %ss, %eax; \ |
391 | /* see if on 16bit stack */ \ | 500 | /* see if on 16bit stack */ \ |
392 | cmpw $__ESPFIX_SS, %ax; \ | 501 | cmpw $__ESPFIX_SS, %ax; \ |
393 | jne 28f; \ | 502 | je 28f; \ |
394 | movl $__KERNEL_DS, %edx; \ | 503 | 27: popl %eax; \ |
395 | movl %edx, %ds; \ | 504 | CFI_ADJUST_CFA_OFFSET -4; \ |
396 | movl %edx, %es; \ | 505 | .section .fixup,"ax"; \ |
506 | 28: movl $__KERNEL_DS, %eax; \ | ||
507 | movl %eax, %ds; \ | ||
508 | movl %eax, %es; \ | ||
397 | /* switch to 32bit stack */ \ | 509 | /* switch to 32bit stack */ \ |
398 | FIXUP_ESPFIX_STACK \ | 510 | FIXUP_ESPFIX_STACK; \ |
399 | 28: popl %eax; | 511 | jmp 27b; \ |
512 | .previous | ||
400 | 513 | ||
401 | /* | 514 | /* |
402 | * Build the entry stubs and pointer table with | 515 | * Build the entry stubs and pointer table with |
@@ -408,9 +521,14 @@ ENTRY(interrupt) | |||
408 | 521 | ||
409 | vector=0 | 522 | vector=0 |
410 | ENTRY(irq_entries_start) | 523 | ENTRY(irq_entries_start) |
524 | RING0_INT_FRAME | ||
411 | .rept NR_IRQS | 525 | .rept NR_IRQS |
412 | ALIGN | 526 | ALIGN |
413 | 1: pushl $vector-256 | 527 | .if vector |
528 | CFI_ADJUST_CFA_OFFSET -4 | ||
529 | .endif | ||
530 | 1: pushl $~(vector) | ||
531 | CFI_ADJUST_CFA_OFFSET 4 | ||
414 | jmp common_interrupt | 532 | jmp common_interrupt |
415 | .data | 533 | .data |
416 | .long 1b | 534 | .long 1b |
@@ -424,60 +542,99 @@ common_interrupt: | |||
424 | movl %esp,%eax | 542 | movl %esp,%eax |
425 | call do_IRQ | 543 | call do_IRQ |
426 | jmp ret_from_intr | 544 | jmp ret_from_intr |
545 | CFI_ENDPROC | ||
427 | 546 | ||
428 | #define BUILD_INTERRUPT(name, nr) \ | 547 | #define BUILD_INTERRUPT(name, nr) \ |
429 | ENTRY(name) \ | 548 | ENTRY(name) \ |
430 | pushl $nr-256; \ | 549 | RING0_INT_FRAME; \ |
431 | SAVE_ALL \ | 550 | pushl $~(nr); \ |
551 | CFI_ADJUST_CFA_OFFSET 4; \ | ||
552 | SAVE_ALL; \ | ||
432 | movl %esp,%eax; \ | 553 | movl %esp,%eax; \ |
433 | call smp_/**/name; \ | 554 | call smp_/**/name; \ |
434 | jmp ret_from_intr; | 555 | jmp ret_from_intr; \ |
556 | CFI_ENDPROC | ||
435 | 557 | ||
436 | /* The include is where all of the SMP etc. interrupts come from */ | 558 | /* The include is where all of the SMP etc. interrupts come from */ |
437 | #include "entry_arch.h" | 559 | #include "entry_arch.h" |
438 | 560 | ||
439 | ENTRY(divide_error) | 561 | ENTRY(divide_error) |
562 | RING0_INT_FRAME | ||
440 | pushl $0 # no error code | 563 | pushl $0 # no error code |
564 | CFI_ADJUST_CFA_OFFSET 4 | ||
441 | pushl $do_divide_error | 565 | pushl $do_divide_error |
566 | CFI_ADJUST_CFA_OFFSET 4 | ||
442 | ALIGN | 567 | ALIGN |
443 | error_code: | 568 | error_code: |
444 | pushl %ds | 569 | pushl %ds |
570 | CFI_ADJUST_CFA_OFFSET 4 | ||
571 | /*CFI_REL_OFFSET ds, 0*/ | ||
445 | pushl %eax | 572 | pushl %eax |
573 | CFI_ADJUST_CFA_OFFSET 4 | ||
574 | CFI_REL_OFFSET eax, 0 | ||
446 | xorl %eax, %eax | 575 | xorl %eax, %eax |
447 | pushl %ebp | 576 | pushl %ebp |
577 | CFI_ADJUST_CFA_OFFSET 4 | ||
578 | CFI_REL_OFFSET ebp, 0 | ||
448 | pushl %edi | 579 | pushl %edi |
580 | CFI_ADJUST_CFA_OFFSET 4 | ||
581 | CFI_REL_OFFSET edi, 0 | ||
449 | pushl %esi | 582 | pushl %esi |
583 | CFI_ADJUST_CFA_OFFSET 4 | ||
584 | CFI_REL_OFFSET esi, 0 | ||
450 | pushl %edx | 585 | pushl %edx |
586 | CFI_ADJUST_CFA_OFFSET 4 | ||
587 | CFI_REL_OFFSET edx, 0 | ||
451 | decl %eax # eax = -1 | 588 | decl %eax # eax = -1 |
452 | pushl %ecx | 589 | pushl %ecx |
590 | CFI_ADJUST_CFA_OFFSET 4 | ||
591 | CFI_REL_OFFSET ecx, 0 | ||
453 | pushl %ebx | 592 | pushl %ebx |
593 | CFI_ADJUST_CFA_OFFSET 4 | ||
594 | CFI_REL_OFFSET ebx, 0 | ||
454 | cld | 595 | cld |
455 | pushl %es | 596 | pushl %es |
597 | CFI_ADJUST_CFA_OFFSET 4 | ||
598 | /*CFI_REL_OFFSET es, 0*/ | ||
456 | UNWIND_ESPFIX_STACK | 599 | UNWIND_ESPFIX_STACK |
457 | popl %ecx | 600 | popl %ecx |
601 | CFI_ADJUST_CFA_OFFSET -4 | ||
602 | /*CFI_REGISTER es, ecx*/ | ||
458 | movl ES(%esp), %edi # get the function address | 603 | movl ES(%esp), %edi # get the function address |
459 | movl ORIG_EAX(%esp), %edx # get the error code | 604 | movl ORIG_EAX(%esp), %edx # get the error code |
460 | movl %eax, ORIG_EAX(%esp) | 605 | movl %eax, ORIG_EAX(%esp) |
461 | movl %ecx, ES(%esp) | 606 | movl %ecx, ES(%esp) |
607 | /*CFI_REL_OFFSET es, ES*/ | ||
462 | movl $(__USER_DS), %ecx | 608 | movl $(__USER_DS), %ecx |
463 | movl %ecx, %ds | 609 | movl %ecx, %ds |
464 | movl %ecx, %es | 610 | movl %ecx, %es |
465 | movl %esp,%eax # pt_regs pointer | 611 | movl %esp,%eax # pt_regs pointer |
466 | call *%edi | 612 | call *%edi |
467 | jmp ret_from_exception | 613 | jmp ret_from_exception |
614 | CFI_ENDPROC | ||
468 | 615 | ||
469 | ENTRY(coprocessor_error) | 616 | ENTRY(coprocessor_error) |
617 | RING0_INT_FRAME | ||
470 | pushl $0 | 618 | pushl $0 |
619 | CFI_ADJUST_CFA_OFFSET 4 | ||
471 | pushl $do_coprocessor_error | 620 | pushl $do_coprocessor_error |
621 | CFI_ADJUST_CFA_OFFSET 4 | ||
472 | jmp error_code | 622 | jmp error_code |
623 | CFI_ENDPROC | ||
473 | 624 | ||
474 | ENTRY(simd_coprocessor_error) | 625 | ENTRY(simd_coprocessor_error) |
626 | RING0_INT_FRAME | ||
475 | pushl $0 | 627 | pushl $0 |
628 | CFI_ADJUST_CFA_OFFSET 4 | ||
476 | pushl $do_simd_coprocessor_error | 629 | pushl $do_simd_coprocessor_error |
630 | CFI_ADJUST_CFA_OFFSET 4 | ||
477 | jmp error_code | 631 | jmp error_code |
632 | CFI_ENDPROC | ||
478 | 633 | ||
479 | ENTRY(device_not_available) | 634 | ENTRY(device_not_available) |
635 | RING0_INT_FRAME | ||
480 | pushl $-1 # mark this as an int | 636 | pushl $-1 # mark this as an int |
637 | CFI_ADJUST_CFA_OFFSET 4 | ||
481 | SAVE_ALL | 638 | SAVE_ALL |
482 | movl %cr0, %eax | 639 | movl %cr0, %eax |
483 | testl $0x4, %eax # EM (math emulation bit) | 640 | testl $0x4, %eax # EM (math emulation bit) |
@@ -487,9 +644,12 @@ ENTRY(device_not_available) | |||
487 | jmp ret_from_exception | 644 | jmp ret_from_exception |
488 | device_not_available_emulate: | 645 | device_not_available_emulate: |
489 | pushl $0 # temporary storage for ORIG_EIP | 646 | pushl $0 # temporary storage for ORIG_EIP |
647 | CFI_ADJUST_CFA_OFFSET 4 | ||
490 | call math_emulate | 648 | call math_emulate |
491 | addl $4, %esp | 649 | addl $4, %esp |
650 | CFI_ADJUST_CFA_OFFSET -4 | ||
492 | jmp ret_from_exception | 651 | jmp ret_from_exception |
652 | CFI_ENDPROC | ||
493 | 653 | ||
494 | /* | 654 | /* |
495 | * Debug traps and NMI can happen at the one SYSENTER instruction | 655 | * Debug traps and NMI can happen at the one SYSENTER instruction |
@@ -514,16 +674,19 @@ label: \ | |||
514 | pushl $sysenter_past_esp | 674 | pushl $sysenter_past_esp |
515 | 675 | ||
516 | KPROBE_ENTRY(debug) | 676 | KPROBE_ENTRY(debug) |
677 | RING0_INT_FRAME | ||
517 | cmpl $sysenter_entry,(%esp) | 678 | cmpl $sysenter_entry,(%esp) |
518 | jne debug_stack_correct | 679 | jne debug_stack_correct |
519 | FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) | 680 | FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) |
520 | debug_stack_correct: | 681 | debug_stack_correct: |
521 | pushl $-1 # mark this as an int | 682 | pushl $-1 # mark this as an int |
683 | CFI_ADJUST_CFA_OFFSET 4 | ||
522 | SAVE_ALL | 684 | SAVE_ALL |
523 | xorl %edx,%edx # error code 0 | 685 | xorl %edx,%edx # error code 0 |
524 | movl %esp,%eax # pt_regs pointer | 686 | movl %esp,%eax # pt_regs pointer |
525 | call do_debug | 687 | call do_debug |
526 | jmp ret_from_exception | 688 | jmp ret_from_exception |
689 | CFI_ENDPROC | ||
527 | .previous .text | 690 | .previous .text |
528 | /* | 691 | /* |
529 | * NMI is doubly nasty. It can happen _while_ we're handling | 692 | * NMI is doubly nasty. It can happen _while_ we're handling |
@@ -534,14 +697,18 @@ debug_stack_correct: | |||
534 | * fault happened on the sysenter path. | 697 | * fault happened on the sysenter path. |
535 | */ | 698 | */ |
536 | ENTRY(nmi) | 699 | ENTRY(nmi) |
700 | RING0_INT_FRAME | ||
537 | pushl %eax | 701 | pushl %eax |
702 | CFI_ADJUST_CFA_OFFSET 4 | ||
538 | movl %ss, %eax | 703 | movl %ss, %eax |
539 | cmpw $__ESPFIX_SS, %ax | 704 | cmpw $__ESPFIX_SS, %ax |
540 | popl %eax | 705 | popl %eax |
706 | CFI_ADJUST_CFA_OFFSET -4 | ||
541 | je nmi_16bit_stack | 707 | je nmi_16bit_stack |
542 | cmpl $sysenter_entry,(%esp) | 708 | cmpl $sysenter_entry,(%esp) |
543 | je nmi_stack_fixup | 709 | je nmi_stack_fixup |
544 | pushl %eax | 710 | pushl %eax |
711 | CFI_ADJUST_CFA_OFFSET 4 | ||
545 | movl %esp,%eax | 712 | movl %esp,%eax |
546 | /* Do not access memory above the end of our stack page, | 713 | /* Do not access memory above the end of our stack page, |
547 | * it might not exist. | 714 | * it might not exist. |
@@ -549,16 +716,19 @@ ENTRY(nmi) | |||
549 | andl $(THREAD_SIZE-1),%eax | 716 | andl $(THREAD_SIZE-1),%eax |
550 | cmpl $(THREAD_SIZE-20),%eax | 717 | cmpl $(THREAD_SIZE-20),%eax |
551 | popl %eax | 718 | popl %eax |
719 | CFI_ADJUST_CFA_OFFSET -4 | ||
552 | jae nmi_stack_correct | 720 | jae nmi_stack_correct |
553 | cmpl $sysenter_entry,12(%esp) | 721 | cmpl $sysenter_entry,12(%esp) |
554 | je nmi_debug_stack_check | 722 | je nmi_debug_stack_check |
555 | nmi_stack_correct: | 723 | nmi_stack_correct: |
556 | pushl %eax | 724 | pushl %eax |
725 | CFI_ADJUST_CFA_OFFSET 4 | ||
557 | SAVE_ALL | 726 | SAVE_ALL |
558 | xorl %edx,%edx # zero error code | 727 | xorl %edx,%edx # zero error code |
559 | movl %esp,%eax # pt_regs pointer | 728 | movl %esp,%eax # pt_regs pointer |
560 | call do_nmi | 729 | call do_nmi |
561 | jmp restore_all | 730 | jmp restore_all |
731 | CFI_ENDPROC | ||
562 | 732 | ||
563 | nmi_stack_fixup: | 733 | nmi_stack_fixup: |
564 | FIX_STACK(12,nmi_stack_correct, 1) | 734 | FIX_STACK(12,nmi_stack_correct, 1) |
@@ -574,94 +744,177 @@ nmi_debug_stack_check: | |||
574 | jmp nmi_stack_correct | 744 | jmp nmi_stack_correct |
575 | 745 | ||
576 | nmi_16bit_stack: | 746 | nmi_16bit_stack: |
747 | RING0_INT_FRAME | ||
577 | /* create the pointer to lss back */ | 748 | /* create the pointer to lss back */ |
578 | pushl %ss | 749 | pushl %ss |
750 | CFI_ADJUST_CFA_OFFSET 4 | ||
579 | pushl %esp | 751 | pushl %esp |
752 | CFI_ADJUST_CFA_OFFSET 4 | ||
580 | movzwl %sp, %esp | 753 | movzwl %sp, %esp |
581 | addw $4, (%esp) | 754 | addw $4, (%esp) |
582 | /* copy the iret frame of 12 bytes */ | 755 | /* copy the iret frame of 12 bytes */ |
583 | .rept 3 | 756 | .rept 3 |
584 | pushl 16(%esp) | 757 | pushl 16(%esp) |
758 | CFI_ADJUST_CFA_OFFSET 4 | ||
585 | .endr | 759 | .endr |
586 | pushl %eax | 760 | pushl %eax |
761 | CFI_ADJUST_CFA_OFFSET 4 | ||
587 | SAVE_ALL | 762 | SAVE_ALL |
588 | FIXUP_ESPFIX_STACK # %eax == %esp | 763 | FIXUP_ESPFIX_STACK # %eax == %esp |
764 | CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved | ||
589 | xorl %edx,%edx # zero error code | 765 | xorl %edx,%edx # zero error code |
590 | call do_nmi | 766 | call do_nmi |
591 | RESTORE_REGS | 767 | RESTORE_REGS |
592 | lss 12+4(%esp), %esp # back to 16bit stack | 768 | lss 12+4(%esp), %esp # back to 16bit stack |
593 | 1: iret | 769 | 1: iret |
770 | CFI_ENDPROC | ||
594 | .section __ex_table,"a" | 771 | .section __ex_table,"a" |
595 | .align 4 | 772 | .align 4 |
596 | .long 1b,iret_exc | 773 | .long 1b,iret_exc |
597 | .previous | 774 | .previous |
598 | 775 | ||
599 | KPROBE_ENTRY(int3) | 776 | KPROBE_ENTRY(int3) |
777 | RING0_INT_FRAME | ||
600 | pushl $-1 # mark this as an int | 778 | pushl $-1 # mark this as an int |
779 | CFI_ADJUST_CFA_OFFSET 4 | ||
601 | SAVE_ALL | 780 | SAVE_ALL |
602 | xorl %edx,%edx # zero error code | 781 | xorl %edx,%edx # zero error code |
603 | movl %esp,%eax # pt_regs pointer | 782 | movl %esp,%eax # pt_regs pointer |
604 | call do_int3 | 783 | call do_int3 |
605 | jmp ret_from_exception | 784 | jmp ret_from_exception |
785 | CFI_ENDPROC | ||
606 | .previous .text | 786 | .previous .text |
607 | 787 | ||
608 | ENTRY(overflow) | 788 | ENTRY(overflow) |
789 | RING0_INT_FRAME | ||
609 | pushl $0 | 790 | pushl $0 |
791 | CFI_ADJUST_CFA_OFFSET 4 | ||
610 | pushl $do_overflow | 792 | pushl $do_overflow |
793 | CFI_ADJUST_CFA_OFFSET 4 | ||
611 | jmp error_code | 794 | jmp error_code |
795 | CFI_ENDPROC | ||
612 | 796 | ||
613 | ENTRY(bounds) | 797 | ENTRY(bounds) |
798 | RING0_INT_FRAME | ||
614 | pushl $0 | 799 | pushl $0 |
800 | CFI_ADJUST_CFA_OFFSET 4 | ||
615 | pushl $do_bounds | 801 | pushl $do_bounds |
802 | CFI_ADJUST_CFA_OFFSET 4 | ||
616 | jmp error_code | 803 | jmp error_code |
804 | CFI_ENDPROC | ||
617 | 805 | ||
618 | ENTRY(invalid_op) | 806 | ENTRY(invalid_op) |
807 | RING0_INT_FRAME | ||
619 | pushl $0 | 808 | pushl $0 |
809 | CFI_ADJUST_CFA_OFFSET 4 | ||
620 | pushl $do_invalid_op | 810 | pushl $do_invalid_op |
811 | CFI_ADJUST_CFA_OFFSET 4 | ||
621 | jmp error_code | 812 | jmp error_code |
813 | CFI_ENDPROC | ||
622 | 814 | ||
623 | ENTRY(coprocessor_segment_overrun) | 815 | ENTRY(coprocessor_segment_overrun) |
816 | RING0_INT_FRAME | ||
624 | pushl $0 | 817 | pushl $0 |
818 | CFI_ADJUST_CFA_OFFSET 4 | ||
625 | pushl $do_coprocessor_segment_overrun | 819 | pushl $do_coprocessor_segment_overrun |
820 | CFI_ADJUST_CFA_OFFSET 4 | ||
626 | jmp error_code | 821 | jmp error_code |
822 | CFI_ENDPROC | ||
627 | 823 | ||
628 | ENTRY(invalid_TSS) | 824 | ENTRY(invalid_TSS) |
825 | RING0_EC_FRAME | ||
629 | pushl $do_invalid_TSS | 826 | pushl $do_invalid_TSS |
827 | CFI_ADJUST_CFA_OFFSET 4 | ||
630 | jmp error_code | 828 | jmp error_code |
829 | CFI_ENDPROC | ||
631 | 830 | ||
632 | ENTRY(segment_not_present) | 831 | ENTRY(segment_not_present) |
832 | RING0_EC_FRAME | ||
633 | pushl $do_segment_not_present | 833 | pushl $do_segment_not_present |
834 | CFI_ADJUST_CFA_OFFSET 4 | ||
634 | jmp error_code | 835 | jmp error_code |
836 | CFI_ENDPROC | ||
635 | 837 | ||
636 | ENTRY(stack_segment) | 838 | ENTRY(stack_segment) |
839 | RING0_EC_FRAME | ||
637 | pushl $do_stack_segment | 840 | pushl $do_stack_segment |
841 | CFI_ADJUST_CFA_OFFSET 4 | ||
638 | jmp error_code | 842 | jmp error_code |
843 | CFI_ENDPROC | ||
639 | 844 | ||
640 | KPROBE_ENTRY(general_protection) | 845 | KPROBE_ENTRY(general_protection) |
846 | RING0_EC_FRAME | ||
641 | pushl $do_general_protection | 847 | pushl $do_general_protection |
848 | CFI_ADJUST_CFA_OFFSET 4 | ||
642 | jmp error_code | 849 | jmp error_code |
850 | CFI_ENDPROC | ||
643 | .previous .text | 851 | .previous .text |
644 | 852 | ||
645 | ENTRY(alignment_check) | 853 | ENTRY(alignment_check) |
854 | RING0_EC_FRAME | ||
646 | pushl $do_alignment_check | 855 | pushl $do_alignment_check |
856 | CFI_ADJUST_CFA_OFFSET 4 | ||
647 | jmp error_code | 857 | jmp error_code |
858 | CFI_ENDPROC | ||
648 | 859 | ||
649 | KPROBE_ENTRY(page_fault) | 860 | KPROBE_ENTRY(page_fault) |
861 | RING0_EC_FRAME | ||
650 | pushl $do_page_fault | 862 | pushl $do_page_fault |
863 | CFI_ADJUST_CFA_OFFSET 4 | ||
651 | jmp error_code | 864 | jmp error_code |
865 | CFI_ENDPROC | ||
652 | .previous .text | 866 | .previous .text |
653 | 867 | ||
654 | #ifdef CONFIG_X86_MCE | 868 | #ifdef CONFIG_X86_MCE |
655 | ENTRY(machine_check) | 869 | ENTRY(machine_check) |
870 | RING0_INT_FRAME | ||
656 | pushl $0 | 871 | pushl $0 |
872 | CFI_ADJUST_CFA_OFFSET 4 | ||
657 | pushl machine_check_vector | 873 | pushl machine_check_vector |
874 | CFI_ADJUST_CFA_OFFSET 4 | ||
658 | jmp error_code | 875 | jmp error_code |
876 | CFI_ENDPROC | ||
659 | #endif | 877 | #endif |
660 | 878 | ||
661 | ENTRY(spurious_interrupt_bug) | 879 | ENTRY(spurious_interrupt_bug) |
880 | RING0_INT_FRAME | ||
662 | pushl $0 | 881 | pushl $0 |
882 | CFI_ADJUST_CFA_OFFSET 4 | ||
663 | pushl $do_spurious_interrupt_bug | 883 | pushl $do_spurious_interrupt_bug |
884 | CFI_ADJUST_CFA_OFFSET 4 | ||
664 | jmp error_code | 885 | jmp error_code |
886 | CFI_ENDPROC | ||
887 | |||
888 | #ifdef CONFIG_STACK_UNWIND | ||
889 | ENTRY(arch_unwind_init_running) | ||
890 | CFI_STARTPROC | ||
891 | movl 4(%esp), %edx | ||
892 | movl (%esp), %ecx | ||
893 | leal 4(%esp), %eax | ||
894 | movl %ebx, EBX(%edx) | ||
895 | xorl %ebx, %ebx | ||
896 | movl %ebx, ECX(%edx) | ||
897 | movl %ebx, EDX(%edx) | ||
898 | movl %esi, ESI(%edx) | ||
899 | movl %edi, EDI(%edx) | ||
900 | movl %ebp, EBP(%edx) | ||
901 | movl %ebx, EAX(%edx) | ||
902 | movl $__USER_DS, DS(%edx) | ||
903 | movl $__USER_DS, ES(%edx) | ||
904 | movl %ebx, ORIG_EAX(%edx) | ||
905 | movl %ecx, EIP(%edx) | ||
906 | movl 12(%esp), %ecx | ||
907 | movl $__KERNEL_CS, CS(%edx) | ||
908 | movl %ebx, EFLAGS(%edx) | ||
909 | movl %eax, OLDESP(%edx) | ||
910 | movl 8(%esp), %eax | ||
911 | movl %ecx, 8(%esp) | ||
912 | movl EBX(%edx), %ebx | ||
913 | movl $__KERNEL_DS, OLDSS(%edx) | ||
914 | jmpl *%eax | ||
915 | CFI_ENDPROC | ||
916 | ENDPROC(arch_unwind_init_running) | ||
917 | #endif | ||
665 | 918 | ||
666 | .section .rodata,"a" | 919 | .section .rodata,"a" |
667 | #include "syscall_table.S" | 920 | #include "syscall_table.S" |