aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/entry.S234
-rw-r--r--include/asm-i386/dwarf2.h54
2 files changed, 277 insertions, 11 deletions
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index e802f3cac7e3..e6e4506e749a 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -48,6 +48,7 @@
48#include <asm/smp.h> 48#include <asm/smp.h>
49#include <asm/page.h> 49#include <asm/page.h>
50#include <asm/desc.h> 50#include <asm/desc.h>
51#include <asm/dwarf2.h>
51#include "irq_vectors.h" 52#include "irq_vectors.h"
52 53
53#define nr_syscalls ((syscall_table_size)/4) 54#define nr_syscalls ((syscall_table_size)/4)
@@ -85,31 +86,67 @@ VM_MASK = 0x00020000
85#define SAVE_ALL \ 86#define SAVE_ALL \
86 cld; \ 87 cld; \
87 pushl %es; \ 88 pushl %es; \
89 CFI_ADJUST_CFA_OFFSET 4;\
90 /*CFI_REL_OFFSET es, 0;*/\
88 pushl %ds; \ 91 pushl %ds; \
92 CFI_ADJUST_CFA_OFFSET 4;\
93 /*CFI_REL_OFFSET ds, 0;*/\
89 pushl %eax; \ 94 pushl %eax; \
95 CFI_ADJUST_CFA_OFFSET 4;\
96 CFI_REL_OFFSET eax, 0;\
90 pushl %ebp; \ 97 pushl %ebp; \
98 CFI_ADJUST_CFA_OFFSET 4;\
99 CFI_REL_OFFSET ebp, 0;\
91 pushl %edi; \ 100 pushl %edi; \
101 CFI_ADJUST_CFA_OFFSET 4;\
102 CFI_REL_OFFSET edi, 0;\
92 pushl %esi; \ 103 pushl %esi; \
104 CFI_ADJUST_CFA_OFFSET 4;\
105 CFI_REL_OFFSET esi, 0;\
93 pushl %edx; \ 106 pushl %edx; \
107 CFI_ADJUST_CFA_OFFSET 4;\
108 CFI_REL_OFFSET edx, 0;\
94 pushl %ecx; \ 109 pushl %ecx; \
110 CFI_ADJUST_CFA_OFFSET 4;\
111 CFI_REL_OFFSET ecx, 0;\
95 pushl %ebx; \ 112 pushl %ebx; \
113 CFI_ADJUST_CFA_OFFSET 4;\
114 CFI_REL_OFFSET ebx, 0;\
96 movl $(__USER_DS), %edx; \ 115 movl $(__USER_DS), %edx; \
97 movl %edx, %ds; \ 116 movl %edx, %ds; \
98 movl %edx, %es; 117 movl %edx, %es;
99 118
100#define RESTORE_INT_REGS \ 119#define RESTORE_INT_REGS \
101 popl %ebx; \ 120 popl %ebx; \
121 CFI_ADJUST_CFA_OFFSET -4;\
122 CFI_RESTORE ebx;\
102 popl %ecx; \ 123 popl %ecx; \
124 CFI_ADJUST_CFA_OFFSET -4;\
125 CFI_RESTORE ecx;\
103 popl %edx; \ 126 popl %edx; \
127 CFI_ADJUST_CFA_OFFSET -4;\
128 CFI_RESTORE edx;\
104 popl %esi; \ 129 popl %esi; \
130 CFI_ADJUST_CFA_OFFSET -4;\
131 CFI_RESTORE esi;\
105 popl %edi; \ 132 popl %edi; \
133 CFI_ADJUST_CFA_OFFSET -4;\
134 CFI_RESTORE edi;\
106 popl %ebp; \ 135 popl %ebp; \
107 popl %eax 136 CFI_ADJUST_CFA_OFFSET -4;\
137 CFI_RESTORE ebp;\
138 popl %eax; \
139 CFI_ADJUST_CFA_OFFSET -4;\
140 CFI_RESTORE eax
108 141
109#define RESTORE_REGS \ 142#define RESTORE_REGS \
110 RESTORE_INT_REGS; \ 143 RESTORE_INT_REGS; \
1111: popl %ds; \ 1441: popl %ds; \
145 CFI_ADJUST_CFA_OFFSET -4;\
146 /*CFI_RESTORE ds;*/\
1122: popl %es; \ 1472: popl %es; \
148 CFI_ADJUST_CFA_OFFSET -4;\
149 /*CFI_RESTORE es;*/\
113.section .fixup,"ax"; \ 150.section .fixup,"ax"; \
1143: movl $0,(%esp); \ 1513: movl $0,(%esp); \
115 jmp 1b; \ 152 jmp 1b; \
@@ -122,13 +159,43 @@ VM_MASK = 0x00020000
122 .long 2b,4b; \ 159 .long 2b,4b; \
123.previous 160.previous
124 161
162#define RING0_INT_FRAME \
163 CFI_STARTPROC simple;\
164 CFI_DEF_CFA esp, 3*4;\
165 /*CFI_OFFSET cs, -2*4;*/\
166 CFI_OFFSET eip, -3*4
167
168#define RING0_EC_FRAME \
169 CFI_STARTPROC simple;\
170 CFI_DEF_CFA esp, 4*4;\
171 /*CFI_OFFSET cs, -2*4;*/\
172 CFI_OFFSET eip, -3*4
173
174#define RING0_PTREGS_FRAME \
175 CFI_STARTPROC simple;\
176 CFI_DEF_CFA esp, OLDESP-EBX;\
177 /*CFI_OFFSET cs, CS-OLDESP;*/\
178 CFI_OFFSET eip, EIP-OLDESP;\
179 /*CFI_OFFSET es, ES-OLDESP;*/\
180 /*CFI_OFFSET ds, DS-OLDESP;*/\
181 CFI_OFFSET eax, EAX-OLDESP;\
182 CFI_OFFSET ebp, EBP-OLDESP;\
183 CFI_OFFSET edi, EDI-OLDESP;\
184 CFI_OFFSET esi, ESI-OLDESP;\
185 CFI_OFFSET edx, EDX-OLDESP;\
186 CFI_OFFSET ecx, ECX-OLDESP;\
187 CFI_OFFSET ebx, EBX-OLDESP
125 188
126ENTRY(ret_from_fork) 189ENTRY(ret_from_fork)
190 CFI_STARTPROC
127 pushl %eax 191 pushl %eax
192 CFI_ADJUST_CFA_OFFSET -4
128 call schedule_tail 193 call schedule_tail
129 GET_THREAD_INFO(%ebp) 194 GET_THREAD_INFO(%ebp)
130 popl %eax 195 popl %eax
196 CFI_ADJUST_CFA_OFFSET -4
131 jmp syscall_exit 197 jmp syscall_exit
198 CFI_ENDPROC
132 199
133/* 200/*
134 * Return to user mode is not as complex as all this looks, 201 * Return to user mode is not as complex as all this looks,
@@ -139,6 +206,7 @@ ENTRY(ret_from_fork)
139 206
140 # userspace resumption stub bypassing syscall exit tracing 207 # userspace resumption stub bypassing syscall exit tracing
141 ALIGN 208 ALIGN
209 RING0_PTREGS_FRAME
142ret_from_exception: 210ret_from_exception:
143 preempt_stop 211 preempt_stop
144ret_from_intr: 212ret_from_intr:
@@ -171,20 +239,33 @@ need_resched:
171 call preempt_schedule_irq 239 call preempt_schedule_irq
172 jmp need_resched 240 jmp need_resched
173#endif 241#endif
242 CFI_ENDPROC
174 243
175/* SYSENTER_RETURN points to after the "sysenter" instruction in 244/* SYSENTER_RETURN points to after the "sysenter" instruction in
176 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ 245 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
177 246
178 # sysenter call handler stub 247 # sysenter call handler stub
179ENTRY(sysenter_entry) 248ENTRY(sysenter_entry)
249 CFI_STARTPROC simple
250 CFI_DEF_CFA esp, 0
251 CFI_REGISTER esp, ebp
180 movl TSS_sysenter_esp0(%esp),%esp 252 movl TSS_sysenter_esp0(%esp),%esp
181sysenter_past_esp: 253sysenter_past_esp:
182 sti 254 sti
183 pushl $(__USER_DS) 255 pushl $(__USER_DS)
256 CFI_ADJUST_CFA_OFFSET 4
257 /*CFI_REL_OFFSET ss, 0*/
184 pushl %ebp 258 pushl %ebp
259 CFI_ADJUST_CFA_OFFSET 4
260 CFI_REL_OFFSET esp, 0
185 pushfl 261 pushfl
262 CFI_ADJUST_CFA_OFFSET 4
186 pushl $(__USER_CS) 263 pushl $(__USER_CS)
264 CFI_ADJUST_CFA_OFFSET 4
265 /*CFI_REL_OFFSET cs, 0*/
187 pushl $SYSENTER_RETURN 266 pushl $SYSENTER_RETURN
267 CFI_ADJUST_CFA_OFFSET 4
268 CFI_REL_OFFSET eip, 0
188 269
189/* 270/*
190 * Load the potential sixth argument from user stack. 271 * Load the potential sixth argument from user stack.
@@ -199,6 +280,7 @@ sysenter_past_esp:
199.previous 280.previous
200 281
201 pushl %eax 282 pushl %eax
283 CFI_ADJUST_CFA_OFFSET 4
202 SAVE_ALL 284 SAVE_ALL
203 GET_THREAD_INFO(%ebp) 285 GET_THREAD_INFO(%ebp)
204 286
@@ -219,11 +301,14 @@ sysenter_past_esp:
219 xorl %ebp,%ebp 301 xorl %ebp,%ebp
220 sti 302 sti
221 sysexit 303 sysexit
304 CFI_ENDPROC
222 305
223 306
224 # system call handler stub 307 # system call handler stub
225ENTRY(system_call) 308ENTRY(system_call)
309 RING0_INT_FRAME # can't unwind into user space anyway
226 pushl %eax # save orig_eax 310 pushl %eax # save orig_eax
311 CFI_ADJUST_CFA_OFFSET 4
227 SAVE_ALL 312 SAVE_ALL
228 GET_THREAD_INFO(%ebp) 313 GET_THREAD_INFO(%ebp)
229 testl $TF_MASK,EFLAGS(%esp) 314 testl $TF_MASK,EFLAGS(%esp)
@@ -256,10 +341,12 @@ restore_all:
256 movb CS(%esp), %al 341 movb CS(%esp), %al
257 andl $(VM_MASK | (4 << 8) | 3), %eax 342 andl $(VM_MASK | (4 << 8) | 3), %eax
258 cmpl $((4 << 8) | 3), %eax 343 cmpl $((4 << 8) | 3), %eax
344 CFI_REMEMBER_STATE
259 je ldt_ss # returning to user-space with LDT SS 345 je ldt_ss # returning to user-space with LDT SS
260restore_nocheck: 346restore_nocheck:
261 RESTORE_REGS 347 RESTORE_REGS
262 addl $4, %esp 348 addl $4, %esp
349 CFI_ADJUST_CFA_OFFSET -4
2631: iret 3501: iret
264.section .fixup,"ax" 351.section .fixup,"ax"
265iret_exc: 352iret_exc:
@@ -273,6 +360,7 @@ iret_exc:
273 .long 1b,iret_exc 360 .long 1b,iret_exc
274.previous 361.previous
275 362
363 CFI_RESTORE_STATE
276ldt_ss: 364ldt_ss:
277 larl OLDSS(%esp), %eax 365 larl OLDSS(%esp), %eax
278 jnz restore_nocheck 366 jnz restore_nocheck
@@ -285,11 +373,13 @@ ldt_ss:
285 * CPUs, which we can try to work around to make 373 * CPUs, which we can try to work around to make
286 * dosemu and wine happy. */ 374 * dosemu and wine happy. */
287 subl $8, %esp # reserve space for switch16 pointer 375 subl $8, %esp # reserve space for switch16 pointer
376 CFI_ADJUST_CFA_OFFSET 8
288 cli 377 cli
289 movl %esp, %eax 378 movl %esp, %eax
290 /* Set up the 16bit stack frame with switch32 pointer on top, 379 /* Set up the 16bit stack frame with switch32 pointer on top,
291 * and a switch16 pointer on top of the current frame. */ 380 * and a switch16 pointer on top of the current frame. */
292 call setup_x86_bogus_stack 381 call setup_x86_bogus_stack
382 CFI_ADJUST_CFA_OFFSET -8 # frame has moved
293 RESTORE_REGS 383 RESTORE_REGS
294 lss 20+4(%esp), %esp # switch to 16bit stack 384 lss 20+4(%esp), %esp # switch to 16bit stack
2951: iret 3851: iret
@@ -297,9 +387,11 @@ ldt_ss:
297 .align 4 387 .align 4
298 .long 1b,iret_exc 388 .long 1b,iret_exc
299.previous 389.previous
390 CFI_ENDPROC
300 391
301 # perform work that needs to be done immediately before resumption 392 # perform work that needs to be done immediately before resumption
302 ALIGN 393 ALIGN
394 RING0_PTREGS_FRAME # can't unwind into user space anyway
303work_pending: 395work_pending:
304 testb $_TIF_NEED_RESCHED, %cl 396 testb $_TIF_NEED_RESCHED, %cl
305 jz work_notifysig 397 jz work_notifysig
@@ -329,8 +421,10 @@ work_notifysig: # deal with pending signals and
329work_notifysig_v86: 421work_notifysig_v86:
330#ifdef CONFIG_VM86 422#ifdef CONFIG_VM86
331 pushl %ecx # save ti_flags for do_notify_resume 423 pushl %ecx # save ti_flags for do_notify_resume
424 CFI_ADJUST_CFA_OFFSET 4
332 call save_v86_state # %eax contains pt_regs pointer 425 call save_v86_state # %eax contains pt_regs pointer
333 popl %ecx 426 popl %ecx
427 CFI_ADJUST_CFA_OFFSET -4
334 movl %eax, %esp 428 movl %eax, %esp
335 xorl %edx, %edx 429 xorl %edx, %edx
336 call do_notify_resume 430 call do_notify_resume
@@ -363,19 +457,21 @@ syscall_exit_work:
363 movl $1, %edx 457 movl $1, %edx
364 call do_syscall_trace 458 call do_syscall_trace
365 jmp resume_userspace 459 jmp resume_userspace
460 CFI_ENDPROC
366 461
367 ALIGN 462 RING0_INT_FRAME # can't unwind into user space anyway
368syscall_fault: 463syscall_fault:
369 pushl %eax # save orig_eax 464 pushl %eax # save orig_eax
465 CFI_ADJUST_CFA_OFFSET 4
370 SAVE_ALL 466 SAVE_ALL
371 GET_THREAD_INFO(%ebp) 467 GET_THREAD_INFO(%ebp)
372 movl $-EFAULT,EAX(%esp) 468 movl $-EFAULT,EAX(%esp)
373 jmp resume_userspace 469 jmp resume_userspace
374 470
375 ALIGN
376syscall_badsys: 471syscall_badsys:
377 movl $-ENOSYS,EAX(%esp) 472 movl $-ENOSYS,EAX(%esp)
378 jmp resume_userspace 473 jmp resume_userspace
474 CFI_ENDPROC
379 475
380#define FIXUP_ESPFIX_STACK \ 476#define FIXUP_ESPFIX_STACK \
381 movl %esp, %eax; \ 477 movl %esp, %eax; \
@@ -387,16 +483,21 @@ syscall_badsys:
387 movl %eax, %esp; 483 movl %eax, %esp;
388#define UNWIND_ESPFIX_STACK \ 484#define UNWIND_ESPFIX_STACK \
389 pushl %eax; \ 485 pushl %eax; \
486 CFI_ADJUST_CFA_OFFSET 4; \
390 movl %ss, %eax; \ 487 movl %ss, %eax; \
391 /* see if on 16bit stack */ \ 488 /* see if on 16bit stack */ \
392 cmpw $__ESPFIX_SS, %ax; \ 489 cmpw $__ESPFIX_SS, %ax; \
393 jne 28f; \ 490 je 28f; \
394 movl $__KERNEL_DS, %edx; \ 49127: popl %eax; \
395 movl %edx, %ds; \ 492 CFI_ADJUST_CFA_OFFSET -4; \
396 movl %edx, %es; \ 493.section .fixup,"ax"; \
49428: movl $__KERNEL_DS, %eax; \
495 movl %eax, %ds; \
496 movl %eax, %es; \
397 /* switch to 32bit stack */ \ 497 /* switch to 32bit stack */ \
398 FIXUP_ESPFIX_STACK \ 498 FIXUP_ESPFIX_STACK; \
39928: popl %eax; 499 jmp 27b; \
500.previous
400 501
401/* 502/*
402 * Build the entry stubs and pointer table with 503 * Build the entry stubs and pointer table with
@@ -408,9 +509,14 @@ ENTRY(interrupt)
408 509
409vector=0 510vector=0
410ENTRY(irq_entries_start) 511ENTRY(irq_entries_start)
512 RING0_INT_FRAME
411.rept NR_IRQS 513.rept NR_IRQS
412 ALIGN 514 ALIGN
515 .if vector
516 CFI_ADJUST_CFA_OFFSET -4
517 .endif
4131: pushl $vector-256 5181: pushl $vector-256
519 CFI_ADJUST_CFA_OFFSET 4
414 jmp common_interrupt 520 jmp common_interrupt
415.data 521.data
416 .long 1b 522 .long 1b
@@ -424,60 +530,99 @@ common_interrupt:
424 movl %esp,%eax 530 movl %esp,%eax
425 call do_IRQ 531 call do_IRQ
426 jmp ret_from_intr 532 jmp ret_from_intr
533 CFI_ENDPROC
427 534
428#define BUILD_INTERRUPT(name, nr) \ 535#define BUILD_INTERRUPT(name, nr) \
429ENTRY(name) \ 536ENTRY(name) \
537 RING0_INT_FRAME; \
430 pushl $nr-256; \ 538 pushl $nr-256; \
431 SAVE_ALL \ 539 CFI_ADJUST_CFA_OFFSET 4; \
540 SAVE_ALL; \
432 movl %esp,%eax; \ 541 movl %esp,%eax; \
433 call smp_/**/name; \ 542 call smp_/**/name; \
434 jmp ret_from_intr; 543 jmp ret_from_intr; \
544 CFI_ENDPROC
435 545
436/* The include is where all of the SMP etc. interrupts come from */ 546/* The include is where all of the SMP etc. interrupts come from */
437#include "entry_arch.h" 547#include "entry_arch.h"
438 548
439ENTRY(divide_error) 549ENTRY(divide_error)
550 RING0_INT_FRAME
440 pushl $0 # no error code 551 pushl $0 # no error code
552 CFI_ADJUST_CFA_OFFSET 4
441 pushl $do_divide_error 553 pushl $do_divide_error
554 CFI_ADJUST_CFA_OFFSET 4
442 ALIGN 555 ALIGN
443error_code: 556error_code:
444 pushl %ds 557 pushl %ds
558 CFI_ADJUST_CFA_OFFSET 4
559 /*CFI_REL_OFFSET ds, 0*/
445 pushl %eax 560 pushl %eax
561 CFI_ADJUST_CFA_OFFSET 4
562 CFI_REL_OFFSET eax, 0
446 xorl %eax, %eax 563 xorl %eax, %eax
447 pushl %ebp 564 pushl %ebp
565 CFI_ADJUST_CFA_OFFSET 4
566 CFI_REL_OFFSET ebp, 0
448 pushl %edi 567 pushl %edi
568 CFI_ADJUST_CFA_OFFSET 4
569 CFI_REL_OFFSET edi, 0
449 pushl %esi 570 pushl %esi
571 CFI_ADJUST_CFA_OFFSET 4
572 CFI_REL_OFFSET esi, 0
450 pushl %edx 573 pushl %edx
574 CFI_ADJUST_CFA_OFFSET 4
575 CFI_REL_OFFSET edx, 0
451 decl %eax # eax = -1 576 decl %eax # eax = -1
452 pushl %ecx 577 pushl %ecx
578 CFI_ADJUST_CFA_OFFSET 4
579 CFI_REL_OFFSET ecx, 0
453 pushl %ebx 580 pushl %ebx
581 CFI_ADJUST_CFA_OFFSET 4
582 CFI_REL_OFFSET ebx, 0
454 cld 583 cld
455 pushl %es 584 pushl %es
585 CFI_ADJUST_CFA_OFFSET 4
586 /*CFI_REL_OFFSET es, 0*/
456 UNWIND_ESPFIX_STACK 587 UNWIND_ESPFIX_STACK
457 popl %ecx 588 popl %ecx
589 CFI_ADJUST_CFA_OFFSET -4
590 /*CFI_REGISTER es, ecx*/
458 movl ES(%esp), %edi # get the function address 591 movl ES(%esp), %edi # get the function address
459 movl ORIG_EAX(%esp), %edx # get the error code 592 movl ORIG_EAX(%esp), %edx # get the error code
460 movl %eax, ORIG_EAX(%esp) 593 movl %eax, ORIG_EAX(%esp)
461 movl %ecx, ES(%esp) 594 movl %ecx, ES(%esp)
595 /*CFI_REL_OFFSET es, ES*/
462 movl $(__USER_DS), %ecx 596 movl $(__USER_DS), %ecx
463 movl %ecx, %ds 597 movl %ecx, %ds
464 movl %ecx, %es 598 movl %ecx, %es
465 movl %esp,%eax # pt_regs pointer 599 movl %esp,%eax # pt_regs pointer
466 call *%edi 600 call *%edi
467 jmp ret_from_exception 601 jmp ret_from_exception
602 CFI_ENDPROC
468 603
469ENTRY(coprocessor_error) 604ENTRY(coprocessor_error)
605 RING0_INT_FRAME
470 pushl $0 606 pushl $0
607 CFI_ADJUST_CFA_OFFSET 4
471 pushl $do_coprocessor_error 608 pushl $do_coprocessor_error
609 CFI_ADJUST_CFA_OFFSET 4
472 jmp error_code 610 jmp error_code
611 CFI_ENDPROC
473 612
474ENTRY(simd_coprocessor_error) 613ENTRY(simd_coprocessor_error)
614 RING0_INT_FRAME
475 pushl $0 615 pushl $0
616 CFI_ADJUST_CFA_OFFSET 4
476 pushl $do_simd_coprocessor_error 617 pushl $do_simd_coprocessor_error
618 CFI_ADJUST_CFA_OFFSET 4
477 jmp error_code 619 jmp error_code
620 CFI_ENDPROC
478 621
479ENTRY(device_not_available) 622ENTRY(device_not_available)
623 RING0_INT_FRAME
480 pushl $-1 # mark this as an int 624 pushl $-1 # mark this as an int
625 CFI_ADJUST_CFA_OFFSET 4
481 SAVE_ALL 626 SAVE_ALL
482 movl %cr0, %eax 627 movl %cr0, %eax
483 testl $0x4, %eax # EM (math emulation bit) 628 testl $0x4, %eax # EM (math emulation bit)
@@ -487,9 +632,12 @@ ENTRY(device_not_available)
487 jmp ret_from_exception 632 jmp ret_from_exception
488device_not_available_emulate: 633device_not_available_emulate:
489 pushl $0 # temporary storage for ORIG_EIP 634 pushl $0 # temporary storage for ORIG_EIP
635 CFI_ADJUST_CFA_OFFSET 4
490 call math_emulate 636 call math_emulate
491 addl $4, %esp 637 addl $4, %esp
638 CFI_ADJUST_CFA_OFFSET -4
492 jmp ret_from_exception 639 jmp ret_from_exception
640 CFI_ENDPROC
493 641
494/* 642/*
495 * Debug traps and NMI can happen at the one SYSENTER instruction 643 * Debug traps and NMI can happen at the one SYSENTER instruction
@@ -514,16 +662,19 @@ label: \
514 pushl $sysenter_past_esp 662 pushl $sysenter_past_esp
515 663
516KPROBE_ENTRY(debug) 664KPROBE_ENTRY(debug)
665 RING0_INT_FRAME
517 cmpl $sysenter_entry,(%esp) 666 cmpl $sysenter_entry,(%esp)
518 jne debug_stack_correct 667 jne debug_stack_correct
519 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) 668 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
520debug_stack_correct: 669debug_stack_correct:
521 pushl $-1 # mark this as an int 670 pushl $-1 # mark this as an int
671 CFI_ADJUST_CFA_OFFSET 4
522 SAVE_ALL 672 SAVE_ALL
523 xorl %edx,%edx # error code 0 673 xorl %edx,%edx # error code 0
524 movl %esp,%eax # pt_regs pointer 674 movl %esp,%eax # pt_regs pointer
525 call do_debug 675 call do_debug
526 jmp ret_from_exception 676 jmp ret_from_exception
677 CFI_ENDPROC
527 .previous .text 678 .previous .text
528/* 679/*
529 * NMI is doubly nasty. It can happen _while_ we're handling 680 * NMI is doubly nasty. It can happen _while_ we're handling
@@ -534,14 +685,18 @@ debug_stack_correct:
534 * fault happened on the sysenter path. 685 * fault happened on the sysenter path.
535 */ 686 */
536ENTRY(nmi) 687ENTRY(nmi)
688 RING0_INT_FRAME
537 pushl %eax 689 pushl %eax
690 CFI_ADJUST_CFA_OFFSET 4
538 movl %ss, %eax 691 movl %ss, %eax
539 cmpw $__ESPFIX_SS, %ax 692 cmpw $__ESPFIX_SS, %ax
540 popl %eax 693 popl %eax
694 CFI_ADJUST_CFA_OFFSET -4
541 je nmi_16bit_stack 695 je nmi_16bit_stack
542 cmpl $sysenter_entry,(%esp) 696 cmpl $sysenter_entry,(%esp)
543 je nmi_stack_fixup 697 je nmi_stack_fixup
544 pushl %eax 698 pushl %eax
699 CFI_ADJUST_CFA_OFFSET 4
545 movl %esp,%eax 700 movl %esp,%eax
546 /* Do not access memory above the end of our stack page, 701 /* Do not access memory above the end of our stack page,
547 * it might not exist. 702 * it might not exist.
@@ -549,16 +704,19 @@ ENTRY(nmi)
549 andl $(THREAD_SIZE-1),%eax 704 andl $(THREAD_SIZE-1),%eax
550 cmpl $(THREAD_SIZE-20),%eax 705 cmpl $(THREAD_SIZE-20),%eax
551 popl %eax 706 popl %eax
707 CFI_ADJUST_CFA_OFFSET -4
552 jae nmi_stack_correct 708 jae nmi_stack_correct
553 cmpl $sysenter_entry,12(%esp) 709 cmpl $sysenter_entry,12(%esp)
554 je nmi_debug_stack_check 710 je nmi_debug_stack_check
555nmi_stack_correct: 711nmi_stack_correct:
556 pushl %eax 712 pushl %eax
713 CFI_ADJUST_CFA_OFFSET 4
557 SAVE_ALL 714 SAVE_ALL
558 xorl %edx,%edx # zero error code 715 xorl %edx,%edx # zero error code
559 movl %esp,%eax # pt_regs pointer 716 movl %esp,%eax # pt_regs pointer
560 call do_nmi 717 call do_nmi
561 jmp restore_all 718 jmp restore_all
719 CFI_ENDPROC
562 720
563nmi_stack_fixup: 721nmi_stack_fixup:
564 FIX_STACK(12,nmi_stack_correct, 1) 722 FIX_STACK(12,nmi_stack_correct, 1)
@@ -574,97 +732,150 @@ nmi_debug_stack_check:
574 jmp nmi_stack_correct 732 jmp nmi_stack_correct
575 733
576nmi_16bit_stack: 734nmi_16bit_stack:
735 RING0_INT_FRAME
577 /* create the pointer to lss back */ 736 /* create the pointer to lss back */
578 pushl %ss 737 pushl %ss
738 CFI_ADJUST_CFA_OFFSET 4
579 pushl %esp 739 pushl %esp
740 CFI_ADJUST_CFA_OFFSET 4
580 movzwl %sp, %esp 741 movzwl %sp, %esp
581 addw $4, (%esp) 742 addw $4, (%esp)
582 /* copy the iret frame of 12 bytes */ 743 /* copy the iret frame of 12 bytes */
583 .rept 3 744 .rept 3
584 pushl 16(%esp) 745 pushl 16(%esp)
746 CFI_ADJUST_CFA_OFFSET 4
585 .endr 747 .endr
586 pushl %eax 748 pushl %eax
749 CFI_ADJUST_CFA_OFFSET 4
587 SAVE_ALL 750 SAVE_ALL
588 FIXUP_ESPFIX_STACK # %eax == %esp 751 FIXUP_ESPFIX_STACK # %eax == %esp
752 CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
589 xorl %edx,%edx # zero error code 753 xorl %edx,%edx # zero error code
590 call do_nmi 754 call do_nmi
591 RESTORE_REGS 755 RESTORE_REGS
592 lss 12+4(%esp), %esp # back to 16bit stack 756 lss 12+4(%esp), %esp # back to 16bit stack
5931: iret 7571: iret
758 CFI_ENDPROC
594.section __ex_table,"a" 759.section __ex_table,"a"
595 .align 4 760 .align 4
596 .long 1b,iret_exc 761 .long 1b,iret_exc
597.previous 762.previous
598 763
599KPROBE_ENTRY(int3) 764KPROBE_ENTRY(int3)
765 RING0_INT_FRAME
600 pushl $-1 # mark this as an int 766 pushl $-1 # mark this as an int
767 CFI_ADJUST_CFA_OFFSET 4
601 SAVE_ALL 768 SAVE_ALL
602 xorl %edx,%edx # zero error code 769 xorl %edx,%edx # zero error code
603 movl %esp,%eax # pt_regs pointer 770 movl %esp,%eax # pt_regs pointer
604 call do_int3 771 call do_int3
605 jmp ret_from_exception 772 jmp ret_from_exception
773 CFI_ENDPROC
606 .previous .text 774 .previous .text
607 775
608ENTRY(overflow) 776ENTRY(overflow)
777 RING0_INT_FRAME
609 pushl $0 778 pushl $0
779 CFI_ADJUST_CFA_OFFSET 4
610 pushl $do_overflow 780 pushl $do_overflow
781 CFI_ADJUST_CFA_OFFSET 4
611 jmp error_code 782 jmp error_code
783 CFI_ENDPROC
612 784
613ENTRY(bounds) 785ENTRY(bounds)
786 RING0_INT_FRAME
614 pushl $0 787 pushl $0
788 CFI_ADJUST_CFA_OFFSET 4
615 pushl $do_bounds 789 pushl $do_bounds
790 CFI_ADJUST_CFA_OFFSET 4
616 jmp error_code 791 jmp error_code
792 CFI_ENDPROC
617 793
618ENTRY(invalid_op) 794ENTRY(invalid_op)
795 RING0_INT_FRAME
619 pushl $0 796 pushl $0
797 CFI_ADJUST_CFA_OFFSET 4
620 pushl $do_invalid_op 798 pushl $do_invalid_op
799 CFI_ADJUST_CFA_OFFSET 4
621 jmp error_code 800 jmp error_code
801 CFI_ENDPROC
622 802
623ENTRY(coprocessor_segment_overrun) 803ENTRY(coprocessor_segment_overrun)
804 RING0_INT_FRAME
624 pushl $0 805 pushl $0
806 CFI_ADJUST_CFA_OFFSET 4
625 pushl $do_coprocessor_segment_overrun 807 pushl $do_coprocessor_segment_overrun
808 CFI_ADJUST_CFA_OFFSET 4
626 jmp error_code 809 jmp error_code
810 CFI_ENDPROC
627 811
628ENTRY(invalid_TSS) 812ENTRY(invalid_TSS)
813 RING0_EC_FRAME
629 pushl $do_invalid_TSS 814 pushl $do_invalid_TSS
815 CFI_ADJUST_CFA_OFFSET 4
630 jmp error_code 816 jmp error_code
817 CFI_ENDPROC
631 818
632ENTRY(segment_not_present) 819ENTRY(segment_not_present)
820 RING0_EC_FRAME
633 pushl $do_segment_not_present 821 pushl $do_segment_not_present
822 CFI_ADJUST_CFA_OFFSET 4
634 jmp error_code 823 jmp error_code
824 CFI_ENDPROC
635 825
636ENTRY(stack_segment) 826ENTRY(stack_segment)
827 RING0_EC_FRAME
637 pushl $do_stack_segment 828 pushl $do_stack_segment
829 CFI_ADJUST_CFA_OFFSET 4
638 jmp error_code 830 jmp error_code
831 CFI_ENDPROC
639 832
640KPROBE_ENTRY(general_protection) 833KPROBE_ENTRY(general_protection)
834 RING0_EC_FRAME
641 pushl $do_general_protection 835 pushl $do_general_protection
836 CFI_ADJUST_CFA_OFFSET 4
642 jmp error_code 837 jmp error_code
838 CFI_ENDPROC
643 .previous .text 839 .previous .text
644 840
645ENTRY(alignment_check) 841ENTRY(alignment_check)
842 RING0_EC_FRAME
646 pushl $do_alignment_check 843 pushl $do_alignment_check
844 CFI_ADJUST_CFA_OFFSET 4
647 jmp error_code 845 jmp error_code
846 CFI_ENDPROC
648 847
649KPROBE_ENTRY(page_fault) 848KPROBE_ENTRY(page_fault)
849 RING0_EC_FRAME
650 pushl $do_page_fault 850 pushl $do_page_fault
851 CFI_ADJUST_CFA_OFFSET 4
651 jmp error_code 852 jmp error_code
853 CFI_ENDPROC
652 .previous .text 854 .previous .text
653 855
654#ifdef CONFIG_X86_MCE 856#ifdef CONFIG_X86_MCE
655ENTRY(machine_check) 857ENTRY(machine_check)
858 RING0_INT_FRAME
656 pushl $0 859 pushl $0
860 CFI_ADJUST_CFA_OFFSET 4
657 pushl machine_check_vector 861 pushl machine_check_vector
862 CFI_ADJUST_CFA_OFFSET 4
658 jmp error_code 863 jmp error_code
864 CFI_ENDPROC
659#endif 865#endif
660 866
661ENTRY(spurious_interrupt_bug) 867ENTRY(spurious_interrupt_bug)
868 RING0_INT_FRAME
662 pushl $0 869 pushl $0
870 CFI_ADJUST_CFA_OFFSET 4
663 pushl $do_spurious_interrupt_bug 871 pushl $do_spurious_interrupt_bug
872 CFI_ADJUST_CFA_OFFSET 4
664 jmp error_code 873 jmp error_code
874 CFI_ENDPROC
665 875
666#ifdef CONFIG_STACK_UNWIND 876#ifdef CONFIG_STACK_UNWIND
667ENTRY(arch_unwind_init_running) 877ENTRY(arch_unwind_init_running)
878 CFI_STARTPROC
668 movl 4(%esp), %edx 879 movl 4(%esp), %edx
669 movl (%esp), %ecx 880 movl (%esp), %ecx
670 leal 4(%esp), %eax 881 leal 4(%esp), %eax
@@ -689,6 +900,7 @@ ENTRY(arch_unwind_init_running)
689 movl EBX(%edx), %ebx 900 movl EBX(%edx), %ebx
690 movl $__KERNEL_DS, OLDSS(%edx) 901 movl $__KERNEL_DS, OLDSS(%edx)
691 jmpl *%eax 902 jmpl *%eax
903 CFI_ENDPROC
692ENDPROC(arch_unwind_init_running) 904ENDPROC(arch_unwind_init_running)
693#endif 905#endif
694 906
diff --git a/include/asm-i386/dwarf2.h b/include/asm-i386/dwarf2.h
new file mode 100644
index 000000000000..2280f6272f80
--- /dev/null
+++ b/include/asm-i386/dwarf2.h
@@ -0,0 +1,54 @@
1#ifndef _DWARF2_H
2#define _DWARF2_H
3
4#include <linux/config.h>
5
6#ifndef __ASSEMBLY__
7#warning "asm/dwarf2.h should be only included in pure assembly files"
8#endif
9
10/*
11 Macros for dwarf2 CFI unwind table entries.
12 See "as.info" for details on these pseudo ops. Unfortunately
13 they are only supported in very new binutils, so define them
14 away for older version.
15 */
16
17#ifdef CONFIG_UNWIND_INFO
18
19#define CFI_STARTPROC .cfi_startproc
20#define CFI_ENDPROC .cfi_endproc
21#define CFI_DEF_CFA .cfi_def_cfa
22#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
23#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
24#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
25#define CFI_OFFSET .cfi_offset
26#define CFI_REL_OFFSET .cfi_rel_offset
27#define CFI_REGISTER .cfi_register
28#define CFI_RESTORE .cfi_restore
29#define CFI_REMEMBER_STATE .cfi_remember_state
30#define CFI_RESTORE_STATE .cfi_restore_state
31
32#else
33
34/* Due to the structure of pre-exisiting code, don't use assembler line
35 comment character # to ignore the arguments. Instead, use a dummy macro. */
36.macro ignore a=0, b=0, c=0, d=0
37.endm
38
39#define CFI_STARTPROC ignore
40#define CFI_ENDPROC ignore
41#define CFI_DEF_CFA ignore
42#define CFI_DEF_CFA_REGISTER ignore
43#define CFI_DEF_CFA_OFFSET ignore
44#define CFI_ADJUST_CFA_OFFSET ignore
45#define CFI_OFFSET ignore
46#define CFI_REL_OFFSET ignore
47#define CFI_REGISTER ignore
48#define CFI_RESTORE ignore
49#define CFI_REMEMBER_STATE ignore
50#define CFI_RESTORE_STATE ignore
51
52#endif
53
54#endif