diff options
Diffstat (limited to 'arch/i386/kernel/entry.S')
-rw-r--r-- | arch/i386/kernel/entry.S | 110 |
1 files changed, 74 insertions, 36 deletions
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index 87f9f60b803b..5a63d6fdb70e 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S | |||
@@ -76,8 +76,15 @@ DF_MASK = 0x00000400 | |||
76 | NT_MASK = 0x00004000 | 76 | NT_MASK = 0x00004000 |
77 | VM_MASK = 0x00020000 | 77 | VM_MASK = 0x00020000 |
78 | 78 | ||
79 | /* These are replaces for paravirtualization */ | ||
80 | #define DISABLE_INTERRUPTS cli | ||
81 | #define ENABLE_INTERRUPTS sti | ||
82 | #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit | ||
83 | #define INTERRUPT_RETURN iret | ||
84 | #define GET_CR0_INTO_EAX movl %cr0, %eax | ||
85 | |||
79 | #ifdef CONFIG_PREEMPT | 86 | #ifdef CONFIG_PREEMPT |
80 | #define preempt_stop cli; TRACE_IRQS_OFF | 87 | #define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF |
81 | #else | 88 | #else |
82 | #define preempt_stop | 89 | #define preempt_stop |
83 | #define resume_kernel restore_nocheck | 90 | #define resume_kernel restore_nocheck |
@@ -176,18 +183,21 @@ VM_MASK = 0x00020000 | |||
176 | 183 | ||
177 | #define RING0_INT_FRAME \ | 184 | #define RING0_INT_FRAME \ |
178 | CFI_STARTPROC simple;\ | 185 | CFI_STARTPROC simple;\ |
186 | CFI_SIGNAL_FRAME;\ | ||
179 | CFI_DEF_CFA esp, 3*4;\ | 187 | CFI_DEF_CFA esp, 3*4;\ |
180 | /*CFI_OFFSET cs, -2*4;*/\ | 188 | /*CFI_OFFSET cs, -2*4;*/\ |
181 | CFI_OFFSET eip, -3*4 | 189 | CFI_OFFSET eip, -3*4 |
182 | 190 | ||
183 | #define RING0_EC_FRAME \ | 191 | #define RING0_EC_FRAME \ |
184 | CFI_STARTPROC simple;\ | 192 | CFI_STARTPROC simple;\ |
193 | CFI_SIGNAL_FRAME;\ | ||
185 | CFI_DEF_CFA esp, 4*4;\ | 194 | CFI_DEF_CFA esp, 4*4;\ |
186 | /*CFI_OFFSET cs, -2*4;*/\ | 195 | /*CFI_OFFSET cs, -2*4;*/\ |
187 | CFI_OFFSET eip, -3*4 | 196 | CFI_OFFSET eip, -3*4 |
188 | 197 | ||
189 | #define RING0_PTREGS_FRAME \ | 198 | #define RING0_PTREGS_FRAME \ |
190 | CFI_STARTPROC simple;\ | 199 | CFI_STARTPROC simple;\ |
200 | CFI_SIGNAL_FRAME;\ | ||
191 | CFI_DEF_CFA esp, OLDESP-EBX;\ | 201 | CFI_DEF_CFA esp, OLDESP-EBX;\ |
192 | /*CFI_OFFSET cs, CS-OLDESP;*/\ | 202 | /*CFI_OFFSET cs, CS-OLDESP;*/\ |
193 | CFI_OFFSET eip, EIP-OLDESP;\ | 203 | CFI_OFFSET eip, EIP-OLDESP;\ |
@@ -233,10 +243,11 @@ ret_from_intr: | |||
233 | check_userspace: | 243 | check_userspace: |
234 | movl EFLAGS(%esp), %eax # mix EFLAGS and CS | 244 | movl EFLAGS(%esp), %eax # mix EFLAGS and CS |
235 | movb CS(%esp), %al | 245 | movb CS(%esp), %al |
236 | testl $(VM_MASK | 3), %eax | 246 | andl $(VM_MASK | SEGMENT_RPL_MASK), %eax |
237 | jz resume_kernel | 247 | cmpl $USER_RPL, %eax |
248 | jb resume_kernel # not returning to v8086 or userspace | ||
238 | ENTRY(resume_userspace) | 249 | ENTRY(resume_userspace) |
239 | cli # make sure we don't miss an interrupt | 250 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt |
240 | # setting need_resched or sigpending | 251 | # setting need_resched or sigpending |
241 | # between sampling and the iret | 252 | # between sampling and the iret |
242 | movl TI_flags(%ebp), %ecx | 253 | movl TI_flags(%ebp), %ecx |
@@ -247,7 +258,7 @@ ENTRY(resume_userspace) | |||
247 | 258 | ||
248 | #ifdef CONFIG_PREEMPT | 259 | #ifdef CONFIG_PREEMPT |
249 | ENTRY(resume_kernel) | 260 | ENTRY(resume_kernel) |
250 | cli | 261 | DISABLE_INTERRUPTS |
251 | cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? | 262 | cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? |
252 | jnz restore_nocheck | 263 | jnz restore_nocheck |
253 | need_resched: | 264 | need_resched: |
@@ -267,6 +278,7 @@ need_resched: | |||
267 | # sysenter call handler stub | 278 | # sysenter call handler stub |
268 | ENTRY(sysenter_entry) | 279 | ENTRY(sysenter_entry) |
269 | CFI_STARTPROC simple | 280 | CFI_STARTPROC simple |
281 | CFI_SIGNAL_FRAME | ||
270 | CFI_DEF_CFA esp, 0 | 282 | CFI_DEF_CFA esp, 0 |
271 | CFI_REGISTER esp, ebp | 283 | CFI_REGISTER esp, ebp |
272 | movl TSS_sysenter_esp0(%esp),%esp | 284 | movl TSS_sysenter_esp0(%esp),%esp |
@@ -275,7 +287,7 @@ sysenter_past_esp: | |||
275 | * No need to follow this irqs on/off section: the syscall | 287 | * No need to follow this irqs on/off section: the syscall |
276 | * disabled irqs and here we enable it straight after entry: | 288 | * disabled irqs and here we enable it straight after entry: |
277 | */ | 289 | */ |
278 | sti | 290 | ENABLE_INTERRUPTS |
279 | pushl $(__USER_DS) | 291 | pushl $(__USER_DS) |
280 | CFI_ADJUST_CFA_OFFSET 4 | 292 | CFI_ADJUST_CFA_OFFSET 4 |
281 | /*CFI_REL_OFFSET ss, 0*/ | 293 | /*CFI_REL_OFFSET ss, 0*/ |
@@ -320,7 +332,7 @@ sysenter_past_esp: | |||
320 | jae syscall_badsys | 332 | jae syscall_badsys |
321 | call *sys_call_table(,%eax,4) | 333 | call *sys_call_table(,%eax,4) |
322 | movl %eax,EAX(%esp) | 334 | movl %eax,EAX(%esp) |
323 | cli | 335 | DISABLE_INTERRUPTS |
324 | TRACE_IRQS_OFF | 336 | TRACE_IRQS_OFF |
325 | movl TI_flags(%ebp), %ecx | 337 | movl TI_flags(%ebp), %ecx |
326 | testw $_TIF_ALLWORK_MASK, %cx | 338 | testw $_TIF_ALLWORK_MASK, %cx |
@@ -330,8 +342,7 @@ sysenter_past_esp: | |||
330 | movl OLDESP(%esp), %ecx | 342 | movl OLDESP(%esp), %ecx |
331 | xorl %ebp,%ebp | 343 | xorl %ebp,%ebp |
332 | TRACE_IRQS_ON | 344 | TRACE_IRQS_ON |
333 | sti | 345 | ENABLE_INTERRUPTS_SYSEXIT |
334 | sysexit | ||
335 | CFI_ENDPROC | 346 | CFI_ENDPROC |
336 | 347 | ||
337 | 348 | ||
@@ -356,7 +367,7 @@ syscall_call: | |||
356 | call *sys_call_table(,%eax,4) | 367 | call *sys_call_table(,%eax,4) |
357 | movl %eax,EAX(%esp) # store the return value | 368 | movl %eax,EAX(%esp) # store the return value |
358 | syscall_exit: | 369 | syscall_exit: |
359 | cli # make sure we don't miss an interrupt | 370 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt |
360 | # setting need_resched or sigpending | 371 | # setting need_resched or sigpending |
361 | # between sampling and the iret | 372 | # between sampling and the iret |
362 | TRACE_IRQS_OFF | 373 | TRACE_IRQS_OFF |
@@ -371,8 +382,8 @@ restore_all: | |||
371 | # See comments in process.c:copy_thread() for details. | 382 | # See comments in process.c:copy_thread() for details. |
372 | movb OLDSS(%esp), %ah | 383 | movb OLDSS(%esp), %ah |
373 | movb CS(%esp), %al | 384 | movb CS(%esp), %al |
374 | andl $(VM_MASK | (4 << 8) | 3), %eax | 385 | andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax |
375 | cmpl $((4 << 8) | 3), %eax | 386 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax |
376 | CFI_REMEMBER_STATE | 387 | CFI_REMEMBER_STATE |
377 | je ldt_ss # returning to user-space with LDT SS | 388 | je ldt_ss # returning to user-space with LDT SS |
378 | restore_nocheck: | 389 | restore_nocheck: |
@@ -381,11 +392,11 @@ restore_nocheck_notrace: | |||
381 | RESTORE_REGS | 392 | RESTORE_REGS |
382 | addl $4, %esp | 393 | addl $4, %esp |
383 | CFI_ADJUST_CFA_OFFSET -4 | 394 | CFI_ADJUST_CFA_OFFSET -4 |
384 | 1: iret | 395 | 1: INTERRUPT_RETURN |
385 | .section .fixup,"ax" | 396 | .section .fixup,"ax" |
386 | iret_exc: | 397 | iret_exc: |
387 | TRACE_IRQS_ON | 398 | TRACE_IRQS_ON |
388 | sti | 399 | ENABLE_INTERRUPTS |
389 | pushl $0 # no error code | 400 | pushl $0 # no error code |
390 | pushl $do_iret_error | 401 | pushl $do_iret_error |
391 | jmp error_code | 402 | jmp error_code |
@@ -409,7 +420,7 @@ ldt_ss: | |||
409 | * dosemu and wine happy. */ | 420 | * dosemu and wine happy. */ |
410 | subl $8, %esp # reserve space for switch16 pointer | 421 | subl $8, %esp # reserve space for switch16 pointer |
411 | CFI_ADJUST_CFA_OFFSET 8 | 422 | CFI_ADJUST_CFA_OFFSET 8 |
412 | cli | 423 | DISABLE_INTERRUPTS |
413 | TRACE_IRQS_OFF | 424 | TRACE_IRQS_OFF |
414 | movl %esp, %eax | 425 | movl %esp, %eax |
415 | /* Set up the 16bit stack frame with switch32 pointer on top, | 426 | /* Set up the 16bit stack frame with switch32 pointer on top, |
@@ -419,7 +430,7 @@ ldt_ss: | |||
419 | TRACE_IRQS_IRET | 430 | TRACE_IRQS_IRET |
420 | RESTORE_REGS | 431 | RESTORE_REGS |
421 | lss 20+4(%esp), %esp # switch to 16bit stack | 432 | lss 20+4(%esp), %esp # switch to 16bit stack |
422 | 1: iret | 433 | 1: INTERRUPT_RETURN |
423 | .section __ex_table,"a" | 434 | .section __ex_table,"a" |
424 | .align 4 | 435 | .align 4 |
425 | .long 1b,iret_exc | 436 | .long 1b,iret_exc |
@@ -434,7 +445,7 @@ work_pending: | |||
434 | jz work_notifysig | 445 | jz work_notifysig |
435 | work_resched: | 446 | work_resched: |
436 | call schedule | 447 | call schedule |
437 | cli # make sure we don't miss an interrupt | 448 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt |
438 | # setting need_resched or sigpending | 449 | # setting need_resched or sigpending |
439 | # between sampling and the iret | 450 | # between sampling and the iret |
440 | TRACE_IRQS_OFF | 451 | TRACE_IRQS_OFF |
@@ -490,7 +501,7 @@ syscall_exit_work: | |||
490 | testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl | 501 | testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl |
491 | jz work_pending | 502 | jz work_pending |
492 | TRACE_IRQS_ON | 503 | TRACE_IRQS_ON |
493 | sti # could let do_syscall_trace() call | 504 | ENABLE_INTERRUPTS # could let do_syscall_trace() call |
494 | # schedule() instead | 505 | # schedule() instead |
495 | movl %esp, %eax | 506 | movl %esp, %eax |
496 | movl $1, %edx | 507 | movl $1, %edx |
@@ -591,11 +602,9 @@ ENTRY(name) \ | |||
591 | /* The include is where all of the SMP etc. interrupts come from */ | 602 | /* The include is where all of the SMP etc. interrupts come from */ |
592 | #include "entry_arch.h" | 603 | #include "entry_arch.h" |
593 | 604 | ||
594 | ENTRY(divide_error) | 605 | KPROBE_ENTRY(page_fault) |
595 | RING0_INT_FRAME | 606 | RING0_EC_FRAME |
596 | pushl $0 # no error code | 607 | pushl $do_page_fault |
597 | CFI_ADJUST_CFA_OFFSET 4 | ||
598 | pushl $do_divide_error | ||
599 | CFI_ADJUST_CFA_OFFSET 4 | 608 | CFI_ADJUST_CFA_OFFSET 4 |
600 | ALIGN | 609 | ALIGN |
601 | error_code: | 610 | error_code: |
@@ -645,6 +654,7 @@ error_code: | |||
645 | call *%edi | 654 | call *%edi |
646 | jmp ret_from_exception | 655 | jmp ret_from_exception |
647 | CFI_ENDPROC | 656 | CFI_ENDPROC |
657 | KPROBE_END(page_fault) | ||
648 | 658 | ||
649 | ENTRY(coprocessor_error) | 659 | ENTRY(coprocessor_error) |
650 | RING0_INT_FRAME | 660 | RING0_INT_FRAME |
@@ -669,7 +679,7 @@ ENTRY(device_not_available) | |||
669 | pushl $-1 # mark this as an int | 679 | pushl $-1 # mark this as an int |
670 | CFI_ADJUST_CFA_OFFSET 4 | 680 | CFI_ADJUST_CFA_OFFSET 4 |
671 | SAVE_ALL | 681 | SAVE_ALL |
672 | movl %cr0, %eax | 682 | GET_CR0_INTO_EAX |
673 | testl $0x4, %eax # EM (math emulation bit) | 683 | testl $0x4, %eax # EM (math emulation bit) |
674 | jne device_not_available_emulate | 684 | jne device_not_available_emulate |
675 | preempt_stop | 685 | preempt_stop |
@@ -702,9 +712,15 @@ device_not_available_emulate: | |||
702 | jne ok; \ | 712 | jne ok; \ |
703 | label: \ | 713 | label: \ |
704 | movl TSS_sysenter_esp0+offset(%esp),%esp; \ | 714 | movl TSS_sysenter_esp0+offset(%esp),%esp; \ |
715 | CFI_DEF_CFA esp, 0; \ | ||
716 | CFI_UNDEFINED eip; \ | ||
705 | pushfl; \ | 717 | pushfl; \ |
718 | CFI_ADJUST_CFA_OFFSET 4; \ | ||
706 | pushl $__KERNEL_CS; \ | 719 | pushl $__KERNEL_CS; \ |
707 | pushl $sysenter_past_esp | 720 | CFI_ADJUST_CFA_OFFSET 4; \ |
721 | pushl $sysenter_past_esp; \ | ||
722 | CFI_ADJUST_CFA_OFFSET 4; \ | ||
723 | CFI_REL_OFFSET eip, 0 | ||
708 | 724 | ||
709 | KPROBE_ENTRY(debug) | 725 | KPROBE_ENTRY(debug) |
710 | RING0_INT_FRAME | 726 | RING0_INT_FRAME |
@@ -720,7 +736,8 @@ debug_stack_correct: | |||
720 | call do_debug | 736 | call do_debug |
721 | jmp ret_from_exception | 737 | jmp ret_from_exception |
722 | CFI_ENDPROC | 738 | CFI_ENDPROC |
723 | .previous .text | 739 | KPROBE_END(debug) |
740 | |||
724 | /* | 741 | /* |
725 | * NMI is doubly nasty. It can happen _while_ we're handling | 742 | * NMI is doubly nasty. It can happen _while_ we're handling |
726 | * a debug fault, and the debug fault hasn't yet been able to | 743 | * a debug fault, and the debug fault hasn't yet been able to |
@@ -729,7 +746,7 @@ debug_stack_correct: | |||
729 | * check whether we got an NMI on the debug path where the debug | 746 | * check whether we got an NMI on the debug path where the debug |
730 | * fault happened on the sysenter path. | 747 | * fault happened on the sysenter path. |
731 | */ | 748 | */ |
732 | ENTRY(nmi) | 749 | KPROBE_ENTRY(nmi) |
733 | RING0_INT_FRAME | 750 | RING0_INT_FRAME |
734 | pushl %eax | 751 | pushl %eax |
735 | CFI_ADJUST_CFA_OFFSET 4 | 752 | CFI_ADJUST_CFA_OFFSET 4 |
@@ -754,6 +771,7 @@ ENTRY(nmi) | |||
754 | cmpl $sysenter_entry,12(%esp) | 771 | cmpl $sysenter_entry,12(%esp) |
755 | je nmi_debug_stack_check | 772 | je nmi_debug_stack_check |
756 | nmi_stack_correct: | 773 | nmi_stack_correct: |
774 | /* We have a RING0_INT_FRAME here */ | ||
757 | pushl %eax | 775 | pushl %eax |
758 | CFI_ADJUST_CFA_OFFSET 4 | 776 | CFI_ADJUST_CFA_OFFSET 4 |
759 | SAVE_ALL | 777 | SAVE_ALL |
@@ -764,9 +782,12 @@ nmi_stack_correct: | |||
764 | CFI_ENDPROC | 782 | CFI_ENDPROC |
765 | 783 | ||
766 | nmi_stack_fixup: | 784 | nmi_stack_fixup: |
785 | RING0_INT_FRAME | ||
767 | FIX_STACK(12,nmi_stack_correct, 1) | 786 | FIX_STACK(12,nmi_stack_correct, 1) |
768 | jmp nmi_stack_correct | 787 | jmp nmi_stack_correct |
788 | |||
769 | nmi_debug_stack_check: | 789 | nmi_debug_stack_check: |
790 | /* We have a RING0_INT_FRAME here */ | ||
770 | cmpw $__KERNEL_CS,16(%esp) | 791 | cmpw $__KERNEL_CS,16(%esp) |
771 | jne nmi_stack_correct | 792 | jne nmi_stack_correct |
772 | cmpl $debug,(%esp) | 793 | cmpl $debug,(%esp) |
@@ -777,8 +798,10 @@ nmi_debug_stack_check: | |||
777 | jmp nmi_stack_correct | 798 | jmp nmi_stack_correct |
778 | 799 | ||
779 | nmi_16bit_stack: | 800 | nmi_16bit_stack: |
780 | RING0_INT_FRAME | 801 | /* We have a RING0_INT_FRAME here. |
781 | /* create the pointer to lss back */ | 802 | * |
803 | * create the pointer to lss back | ||
804 | */ | ||
782 | pushl %ss | 805 | pushl %ss |
783 | CFI_ADJUST_CFA_OFFSET 4 | 806 | CFI_ADJUST_CFA_OFFSET 4 |
784 | pushl %esp | 807 | pushl %esp |
@@ -799,12 +822,13 @@ nmi_16bit_stack: | |||
799 | call do_nmi | 822 | call do_nmi |
800 | RESTORE_REGS | 823 | RESTORE_REGS |
801 | lss 12+4(%esp), %esp # back to 16bit stack | 824 | lss 12+4(%esp), %esp # back to 16bit stack |
802 | 1: iret | 825 | 1: INTERRUPT_RETURN |
803 | CFI_ENDPROC | 826 | CFI_ENDPROC |
804 | .section __ex_table,"a" | 827 | .section __ex_table,"a" |
805 | .align 4 | 828 | .align 4 |
806 | .long 1b,iret_exc | 829 | .long 1b,iret_exc |
807 | .previous | 830 | .previous |
831 | KPROBE_END(nmi) | ||
808 | 832 | ||
809 | KPROBE_ENTRY(int3) | 833 | KPROBE_ENTRY(int3) |
810 | RING0_INT_FRAME | 834 | RING0_INT_FRAME |
@@ -816,7 +840,7 @@ KPROBE_ENTRY(int3) | |||
816 | call do_int3 | 840 | call do_int3 |
817 | jmp ret_from_exception | 841 | jmp ret_from_exception |
818 | CFI_ENDPROC | 842 | CFI_ENDPROC |
819 | .previous .text | 843 | KPROBE_END(int3) |
820 | 844 | ||
821 | ENTRY(overflow) | 845 | ENTRY(overflow) |
822 | RING0_INT_FRAME | 846 | RING0_INT_FRAME |
@@ -881,7 +905,7 @@ KPROBE_ENTRY(general_protection) | |||
881 | CFI_ADJUST_CFA_OFFSET 4 | 905 | CFI_ADJUST_CFA_OFFSET 4 |
882 | jmp error_code | 906 | jmp error_code |
883 | CFI_ENDPROC | 907 | CFI_ENDPROC |
884 | .previous .text | 908 | KPROBE_END(general_protection) |
885 | 909 | ||
886 | ENTRY(alignment_check) | 910 | ENTRY(alignment_check) |
887 | RING0_EC_FRAME | 911 | RING0_EC_FRAME |
@@ -890,13 +914,14 @@ ENTRY(alignment_check) | |||
890 | jmp error_code | 914 | jmp error_code |
891 | CFI_ENDPROC | 915 | CFI_ENDPROC |
892 | 916 | ||
893 | KPROBE_ENTRY(page_fault) | 917 | ENTRY(divide_error) |
894 | RING0_EC_FRAME | 918 | RING0_INT_FRAME |
895 | pushl $do_page_fault | 919 | pushl $0 # no error code |
920 | CFI_ADJUST_CFA_OFFSET 4 | ||
921 | pushl $do_divide_error | ||
896 | CFI_ADJUST_CFA_OFFSET 4 | 922 | CFI_ADJUST_CFA_OFFSET 4 |
897 | jmp error_code | 923 | jmp error_code |
898 | CFI_ENDPROC | 924 | CFI_ENDPROC |
899 | .previous .text | ||
900 | 925 | ||
901 | #ifdef CONFIG_X86_MCE | 926 | #ifdef CONFIG_X86_MCE |
902 | ENTRY(machine_check) | 927 | ENTRY(machine_check) |
@@ -949,6 +974,19 @@ ENTRY(arch_unwind_init_running) | |||
949 | ENDPROC(arch_unwind_init_running) | 974 | ENDPROC(arch_unwind_init_running) |
950 | #endif | 975 | #endif |
951 | 976 | ||
977 | ENTRY(kernel_thread_helper) | ||
978 | pushl $0 # fake return address for unwinder | ||
979 | CFI_STARTPROC | ||
980 | movl %edx,%eax | ||
981 | push %edx | ||
982 | CFI_ADJUST_CFA_OFFSET 4 | ||
983 | call *%ebx | ||
984 | push %eax | ||
985 | CFI_ADJUST_CFA_OFFSET 4 | ||
986 | call do_exit | ||
987 | CFI_ENDPROC | ||
988 | ENDPROC(kernel_thread_helper) | ||
989 | |||
952 | .section .rodata,"a" | 990 | .section .rodata,"a" |
953 | #include "syscall_table.S" | 991 | #include "syscall_table.S" |
954 | 992 | ||