diff options
author | Alexander van Heukelum <heukelum@fastmail.fm> | 2009-06-17 18:35:58 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2009-06-18 00:35:09 -0400 |
commit | dc4c2a0aed3b09f6e255bd5c3faa50fe6e0b2ded (patch) | |
tree | f8fb6e15dfeb602b638b1e14a75fea6042839cb0 /arch/x86/kernel/entry_32.S | |
parent | 2e04bc76560decd9270be2a805927316f617ef56 (diff) |
i386: fix/simplify espfix stack switching, move it into assembly
The espfix code triggers if we have a protected mode userspace
application with a 16-bit stack. On returning to userspace, with iret,
the CPU doesn't restore the high word of the stack pointer. This is an
"official" bug, and the work-around used in the kernel is to temporarily
switch to a 32-bit stack segment/pointer pair where the high word of the
pointer is equal to the high word of the userspace stackpointer.
The current implementation uses THREAD_SIZE to determine the cut-off,
but there is no good reason not to use the more natural 64kb... However,
implementing this by simply substituting THREAD_SIZE with 65536 in
patch_espfix_desc crashed the test application. patch_espfix_desc tries
to do what is described above, but gets it subtly wrong if the userspace
stack pointer is just below a multiple of THREAD_SIZE: an overflow
occurs to bit 13... With a bit of luck, when the kernelspace
stackpointer is just below a 64kb-boundary, the overflow then ripples
trough to bit 16 and userspace will see its stack pointer changed by
65536.
This patch moves all espfix code into entry_32.S. Selecting a 16-bit
cut-off simplifies the code. The game with changing the limit dynamically
is removed too. It complicates matters and I see no value in it. Changing
only the top 16-bit word of ESP is one instruction and it also implies
that only two bytes of the ESPFIX GDT entry need to be changed and this
can be implemented in just a handful simple to understand instructions.
As a side effect, the operation to compute the original ESP from the
ESPFIX ESP and the GDT entry simplifies a bit too, and the remaining
three instructions have been expanded inline in entry_32.S.
impact: can now reliably run userspace with ESP=xxxxfffc on 16-bit
stack segment
Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Acked-by: Stas Sergeev <stsp@aknet.ru>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/kernel/entry_32.S')
-rw-r--r-- | arch/x86/kernel/entry_32.S | 49 |
1 files changed, 34 insertions, 15 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index d7d1c7d20e4e..848f73f09549 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -588,24 +588,34 @@ ldt_ss: | |||
588 | jne restore_nocheck | 588 | jne restore_nocheck |
589 | #endif | 589 | #endif |
590 | 590 | ||
591 | /* If returning to userspace with 16bit stack, | 591 | /* |
592 | * try to fix the higher word of ESP, as the CPU | 592 | * Setup and switch to ESPFIX stack |
593 | * won't restore it. | 593 | * |
594 | * This is an "official" bug of all the x86-compatible | 594 | * We're returning to userspace with a 16 bit stack. The CPU will not |
595 | * CPUs, which we can try to work around to make | 595 | * restore the high word of ESP for us on executing iret... This is an |
596 | * dosemu and wine happy. */ | 596 | * "official" bug of all the x86-compatible CPUs, which we can work |
597 | movl PT_OLDESP(%esp), %eax | 597 | * around to make dosemu and wine happy. We do this by preloading the |
598 | movl %esp, %edx | 598 | * high word of ESP with the high word of the userspace ESP while |
599 | call patch_espfix_desc | 599 | * compensating for the offset by changing to the ESPFIX segment with |
600 | * a base address that matches for the difference. | ||
601 | */ | ||
602 | mov %esp, %edx /* load kernel esp */ | ||
603 | mov PT_OLDESP(%esp), %eax /* load userspace esp */ | ||
604 | mov %dx, %ax /* eax: new kernel esp */ | ||
605 | sub %eax, %edx /* offset (low word is 0) */ | ||
606 | PER_CPU(gdt_page, %ebx) | ||
607 | shr $16, %edx | ||
608 | mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */ | ||
609 | mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */ | ||
600 | pushl $__ESPFIX_SS | 610 | pushl $__ESPFIX_SS |
601 | CFI_ADJUST_CFA_OFFSET 4 | 611 | CFI_ADJUST_CFA_OFFSET 4 |
602 | pushl %eax | 612 | push %eax /* new kernel esp */ |
603 | CFI_ADJUST_CFA_OFFSET 4 | 613 | CFI_ADJUST_CFA_OFFSET 4 |
604 | /* Disable interrupts, but do not irqtrace this section: we | 614 | /* Disable interrupts, but do not irqtrace this section: we |
605 | * will soon execute iret and the tracer was already set to | 615 | * will soon execute iret and the tracer was already set to |
606 | * the irqstate after the iret */ | 616 | * the irqstate after the iret */ |
607 | DISABLE_INTERRUPTS(CLBR_EAX) | 617 | DISABLE_INTERRUPTS(CLBR_EAX) |
608 | lss (%esp), %esp | 618 | lss (%esp), %esp /* switch to espfix segment */ |
609 | CFI_ADJUST_CFA_OFFSET -8 | 619 | CFI_ADJUST_CFA_OFFSET -8 |
610 | jmp restore_nocheck | 620 | jmp restore_nocheck |
611 | CFI_ENDPROC | 621 | CFI_ENDPROC |
@@ -718,15 +728,24 @@ PTREGSCALL(vm86) | |||
718 | PTREGSCALL(vm86old) | 728 | PTREGSCALL(vm86old) |
719 | 729 | ||
720 | .macro FIXUP_ESPFIX_STACK | 730 | .macro FIXUP_ESPFIX_STACK |
721 | /* since we are on a wrong stack, we cant make it a C code :( */ | 731 | /* |
732 | * Switch back for ESPFIX stack to the normal zerobased stack | ||
733 | * | ||
734 | * We can't call C functions using the ESPFIX stack. This code reads | ||
735 | * the high word of the segment base from the GDT and swiches to the | ||
736 | * normal stack and adjusts ESP with the matching offset. | ||
737 | */ | ||
738 | /* fixup the stack */ | ||
722 | PER_CPU(gdt_page, %ebx) | 739 | PER_CPU(gdt_page, %ebx) |
723 | GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah) | 740 | mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */ |
724 | addl %esp, %eax | 741 | mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */ |
742 | shl $16, %eax | ||
743 | addl %esp, %eax /* the adjusted stack pointer */ | ||
725 | pushl $__KERNEL_DS | 744 | pushl $__KERNEL_DS |
726 | CFI_ADJUST_CFA_OFFSET 4 | 745 | CFI_ADJUST_CFA_OFFSET 4 |
727 | pushl %eax | 746 | pushl %eax |
728 | CFI_ADJUST_CFA_OFFSET 4 | 747 | CFI_ADJUST_CFA_OFFSET 4 |
729 | lss (%esp), %esp | 748 | lss (%esp), %esp /* switch to the normal stack segment */ |
730 | CFI_ADJUST_CFA_OFFSET -8 | 749 | CFI_ADJUST_CFA_OFFSET -8 |
731 | .endm | 750 | .endm |
732 | .macro UNWIND_ESPFIX_STACK | 751 | .macro UNWIND_ESPFIX_STACK |