aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/entry.S
diff options
context:
space:
mode:
authorStas Sergeev <stsp@aknet.ru>2006-12-06 20:14:01 -0500
committerAndi Kleen <andi@basil.nowhere.org>2006-12-06 20:14:01 -0500
commitbe44d2aabce2d62f72d5751d1871b6212bf7a1c7 (patch)
tree3f190dd5b5747ee83b50c4596b4801ce6c6b551c /arch/i386/kernel/entry.S
parentbb81a09e55eaf7e5f798468ab971469b6f66a259 (diff)
[PATCH] i386: espfix cleanup
Clean up the espfix code: - Introduced PER_CPU() macro to be used from asm - Introduced GET_DESC_BASE() macro to be used from asm - Rewrote the fixup code in asm, as calling a C code with the altered %ss appeared to be unsafe - No longer altering the stack from a .fixup section - 16bit per-cpu stack is no longer used, instead the stack segment base is patched the way so that the high word of the kernel and user %esp are the same. - Added the limit-patching for the espfix segment. (Chuck Ebbert) [jeremy@goop.org: use the x86 scaling addressing mode rather than shifting] Signed-off-by: Stas Sergeev <stsp@aknet.ru> Signed-off-by: Andi Kleen <ak@suse.de> Acked-by: Zachary Amsden <zach@vmware.com> Acked-by: Chuck Ebbert <76306.1226@compuserve.com> Acked-by: Jan Beulich <jbeulich@novell.com> Cc: Andi Kleen <ak@muc.de> Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org> Signed-off-by: Andrew Morton <akpm@osdl.org>
Diffstat (limited to 'arch/i386/kernel/entry.S')
-rw-r--r--arch/i386/kernel/entry.S73
1 files changed, 33 insertions, 40 deletions
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index 5a63d6fdb70e..c38d801ba0bb 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -48,6 +48,7 @@
48#include <asm/smp.h> 48#include <asm/smp.h>
49#include <asm/page.h> 49#include <asm/page.h>
50#include <asm/desc.h> 50#include <asm/desc.h>
51#include <asm/percpu.h>
51#include <asm/dwarf2.h> 52#include <asm/dwarf2.h>
52#include "irq_vectors.h" 53#include "irq_vectors.h"
53 54
@@ -418,23 +419,18 @@ ldt_ss:
418 * This is an "official" bug of all the x86-compatible 419 * This is an "official" bug of all the x86-compatible
419 * CPUs, which we can try to work around to make 420 * CPUs, which we can try to work around to make
420 * dosemu and wine happy. */ 421 * dosemu and wine happy. */
421 subl $8, %esp # reserve space for switch16 pointer 422 movl OLDESP(%esp), %eax
422 CFI_ADJUST_CFA_OFFSET 8 423 movl %esp, %edx
424 call patch_espfix_desc
425 pushl $__ESPFIX_SS
426 CFI_ADJUST_CFA_OFFSET 4
427 pushl %eax
428 CFI_ADJUST_CFA_OFFSET 4
423 DISABLE_INTERRUPTS 429 DISABLE_INTERRUPTS
424 TRACE_IRQS_OFF 430 TRACE_IRQS_OFF
425 movl %esp, %eax 431 lss (%esp), %esp
426 /* Set up the 16bit stack frame with switch32 pointer on top, 432 CFI_ADJUST_CFA_OFFSET -8
427 * and a switch16 pointer on top of the current frame. */ 433 jmp restore_nocheck
428 call setup_x86_bogus_stack
429 CFI_ADJUST_CFA_OFFSET -8 # frame has moved
430 TRACE_IRQS_IRET
431 RESTORE_REGS
432 lss 20+4(%esp), %esp # switch to 16bit stack
4331: INTERRUPT_RETURN
434.section __ex_table,"a"
435 .align 4
436 .long 1b,iret_exc
437.previous
438 CFI_ENDPROC 434 CFI_ENDPROC
439 435
440 # perform work that needs to be done immediately before resumption 436 # perform work that needs to be done immediately before resumption
@@ -524,30 +520,30 @@ syscall_badsys:
524 CFI_ENDPROC 520 CFI_ENDPROC
525 521
526#define FIXUP_ESPFIX_STACK \ 522#define FIXUP_ESPFIX_STACK \
527 movl %esp, %eax; \ 523 /* since we are on a wrong stack, we cant make it a C code :( */ \
528 /* switch to 32bit stack using the pointer on top of 16bit stack */ \ 524 GET_THREAD_INFO(%ebp); \
529 lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \ 525 movl TI_cpu(%ebp), %ebx; \
530 /* copy data from 16bit stack to 32bit stack */ \ 526 PER_CPU(cpu_gdt_descr, %ebx); \
531 call fixup_x86_bogus_stack; \ 527 movl GDS_address(%ebx), %ebx; \
532 /* put ESP to the proper location */ \ 528 GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
533 movl %eax, %esp; 529 addl %esp, %eax; \
534#define UNWIND_ESPFIX_STACK \ 530 pushl $__KERNEL_DS; \
531 CFI_ADJUST_CFA_OFFSET 4; \
535 pushl %eax; \ 532 pushl %eax; \
536 CFI_ADJUST_CFA_OFFSET 4; \ 533 CFI_ADJUST_CFA_OFFSET 4; \
534 lss (%esp), %esp; \
535 CFI_ADJUST_CFA_OFFSET -8;
536#define UNWIND_ESPFIX_STACK \
537 movl %ss, %eax; \ 537 movl %ss, %eax; \
538 /* see if on 16bit stack */ \ 538 /* see if on espfix stack */ \
539 cmpw $__ESPFIX_SS, %ax; \ 539 cmpw $__ESPFIX_SS, %ax; \
540 je 28f; \ 540 jne 27f; \
54127: popl %eax; \ 541 movl $__KERNEL_DS, %eax; \
542 CFI_ADJUST_CFA_OFFSET -4; \
543.section .fixup,"ax"; \
54428: movl $__KERNEL_DS, %eax; \
545 movl %eax, %ds; \ 542 movl %eax, %ds; \
546 movl %eax, %es; \ 543 movl %eax, %es; \
547 /* switch to 32bit stack */ \ 544 /* switch to normal stack */ \
548 FIXUP_ESPFIX_STACK; \ 545 FIXUP_ESPFIX_STACK; \
549 jmp 27b; \ 54627:;
550.previous
551 547
552/* 548/*
553 * Build the entry stubs and pointer table with 549 * Build the entry stubs and pointer table with
@@ -614,7 +610,6 @@ error_code:
614 pushl %eax 610 pushl %eax
615 CFI_ADJUST_CFA_OFFSET 4 611 CFI_ADJUST_CFA_OFFSET 4
616 CFI_REL_OFFSET eax, 0 612 CFI_REL_OFFSET eax, 0
617 xorl %eax, %eax
618 pushl %ebp 613 pushl %ebp
619 CFI_ADJUST_CFA_OFFSET 4 614 CFI_ADJUST_CFA_OFFSET 4
620 CFI_REL_OFFSET ebp, 0 615 CFI_REL_OFFSET ebp, 0
@@ -627,7 +622,6 @@ error_code:
627 pushl %edx 622 pushl %edx
628 CFI_ADJUST_CFA_OFFSET 4 623 CFI_ADJUST_CFA_OFFSET 4
629 CFI_REL_OFFSET edx, 0 624 CFI_REL_OFFSET edx, 0
630 decl %eax # eax = -1
631 pushl %ecx 625 pushl %ecx
632 CFI_ADJUST_CFA_OFFSET 4 626 CFI_ADJUST_CFA_OFFSET 4
633 CFI_REL_OFFSET ecx, 0 627 CFI_REL_OFFSET ecx, 0
@@ -644,7 +638,7 @@ error_code:
644 /*CFI_REGISTER es, ecx*/ 638 /*CFI_REGISTER es, ecx*/
645 movl ES(%esp), %edi # get the function address 639 movl ES(%esp), %edi # get the function address
646 movl ORIG_EAX(%esp), %edx # get the error code 640 movl ORIG_EAX(%esp), %edx # get the error code
647 movl %eax, ORIG_EAX(%esp) 641 movl $-1, ORIG_EAX(%esp)
648 movl %ecx, ES(%esp) 642 movl %ecx, ES(%esp)
649 /*CFI_REL_OFFSET es, ES*/ 643 /*CFI_REL_OFFSET es, ES*/
650 movl $(__USER_DS), %ecx 644 movl $(__USER_DS), %ecx
@@ -754,7 +748,7 @@ KPROBE_ENTRY(nmi)
754 cmpw $__ESPFIX_SS, %ax 748 cmpw $__ESPFIX_SS, %ax
755 popl %eax 749 popl %eax
756 CFI_ADJUST_CFA_OFFSET -4 750 CFI_ADJUST_CFA_OFFSET -4
757 je nmi_16bit_stack 751 je nmi_espfix_stack
758 cmpl $sysenter_entry,(%esp) 752 cmpl $sysenter_entry,(%esp)
759 je nmi_stack_fixup 753 je nmi_stack_fixup
760 pushl %eax 754 pushl %eax
@@ -797,7 +791,7 @@ nmi_debug_stack_check:
797 FIX_STACK(24,nmi_stack_correct, 1) 791 FIX_STACK(24,nmi_stack_correct, 1)
798 jmp nmi_stack_correct 792 jmp nmi_stack_correct
799 793
800nmi_16bit_stack: 794nmi_espfix_stack:
801 /* We have a RING0_INT_FRAME here. 795 /* We have a RING0_INT_FRAME here.
802 * 796 *
803 * create the pointer to lss back 797 * create the pointer to lss back
@@ -806,7 +800,6 @@ nmi_16bit_stack:
806 CFI_ADJUST_CFA_OFFSET 4 800 CFI_ADJUST_CFA_OFFSET 4
807 pushl %esp 801 pushl %esp
808 CFI_ADJUST_CFA_OFFSET 4 802 CFI_ADJUST_CFA_OFFSET 4
809 movzwl %sp, %esp
810 addw $4, (%esp) 803 addw $4, (%esp)
811 /* copy the iret frame of 12 bytes */ 804 /* copy the iret frame of 12 bytes */
812 .rept 3 805 .rept 3
@@ -817,11 +810,11 @@ nmi_16bit_stack:
817 CFI_ADJUST_CFA_OFFSET 4 810 CFI_ADJUST_CFA_OFFSET 4
818 SAVE_ALL 811 SAVE_ALL
819 FIXUP_ESPFIX_STACK # %eax == %esp 812 FIXUP_ESPFIX_STACK # %eax == %esp
820 CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
821 xorl %edx,%edx # zero error code 813 xorl %edx,%edx # zero error code
822 call do_nmi 814 call do_nmi
823 RESTORE_REGS 815 RESTORE_REGS
824 lss 12+4(%esp), %esp # back to 16bit stack 816 lss 12+4(%esp), %esp # back to espfix stack
817 CFI_ADJUST_CFA_OFFSET -24
8251: INTERRUPT_RETURN 8181: INTERRUPT_RETURN
826 CFI_ENDPROC 819 CFI_ENDPROC
827.section __ex_table,"a" 820.section __ex_table,"a"