aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJens Rottmann <JRottmann@lippert-at.de>2007-11-27 06:35:13 -0500
committerH. Peter Anvin <hpa@zytor.com>2007-11-28 21:17:17 -0500
commit16252da654800461e0e1c32697cb59f4cda15aa9 (patch)
tree6b9c9f8ef73ac7e4b733faa7735a049b193a2c94 /arch
parent09f345da758fca1222b0971b65b2fddbdf78bb83 (diff)
x86 setup: don't recalculate ss:esp unless really necessary
In order to work around old LILO versions providing an invalid ss register, the current setup code always sets up a new stack, immediately following .bss and the heap. But this breaks LOADLIN. This rewrite of the workaround checks for an invalid stack (ss!=ds) first, and leaves ss:sp alone otherwise (apart from aligning esp). [hpa note: LOADLIN has a number of arbitrary hard-coded limits that are being pushed up against. Without some major revision of LOADLIN itself it will not be sustainable keeping it alive. This gives it another brief lease on life, however. This patch also helps the cmdline truncation problem with old versions of SYSLINUX.] Signed-off-by: Jens Rottmann <JRottmann at LiPPERT-AT. de> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/boot/header.S41
1 files changed, 16 insertions, 25 deletions
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 6ef5a060fa11..4cc5b0411db5 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -236,39 +236,30 @@ start_of_setup:
236 movw %ax, %es 236 movw %ax, %es
237 cld 237 cld
238 238
239# Apparently some ancient versions of LILO invoked the kernel 239# Apparently some ancient versions of LILO invoked the kernel with %ss != %ds,
240# with %ss != %ds, which happened to work by accident for the 240# which happened to work by accident for the old code. Recalculate the stack
241# old code. If the CAN_USE_HEAP flag is set in loadflags, or 241# pointer if %ss is invalid. Otherwise leave it alone, LOADLIN sets up the
242# %ss != %ds, then adjust the stack pointer. 242# stack behind its own code, so we can't blindly put it directly past the heap.
243 243
244 # Smallest possible stack we can tolerate
245 movw $(_end+STACK_SIZE), %cx
246
247 movw heap_end_ptr, %dx
248 addw $512, %dx
249 jnc 1f
250 xorw %dx, %dx # Wraparound - whole segment available
2511: testb $CAN_USE_HEAP, loadflags
252 jnz 2f
253
254 # No CAN_USE_HEAP
255 movw %ss, %dx 244 movw %ss, %dx
256 cmpw %ax, %dx # %ds == %ss? 245 cmpw %ax, %dx # %ds == %ss?
257 movw %sp, %dx 246 movw %sp, %dx
258 # If so, assume %sp is reasonably set, otherwise use 247 je 2f # -> assume %sp is reasonably set
259 # the smallest possible stack. 248
260 jne 4f # -> Smallest possible stack... 249 # Invalid %ss, make up a new stack
250 movw $_end, %dx
251 testb $CAN_USE_HEAP, loadflags
252 jz 1f
253 movw heap_end_ptr, %dx
2541: addw $STACK_SIZE, %dx
255 jnc 2f
256 xorw %dx, %dx # Prevent wraparound
261 257
262 # Make sure the stack is at least minimum size. Take a value 2582: # Now %dx should point to the end of our stack space
263 # of zero to mean "full segment."
2642:
265 andw $~3, %dx # dword align (might as well...) 259 andw $~3, %dx # dword align (might as well...)
266 jnz 3f 260 jnz 3f
267 movw $0xfffc, %dx # Make sure we're not zero 261 movw $0xfffc, %dx # Make sure we're not zero
2683: cmpw %cx, %dx 2623: movw %ax, %ss
269 jnb 5f
2704: movw %cx, %dx # Minimum value we can possibly use
2715: movw %ax, %ss
272 movzwl %dx, %esp # Clear upper half of %esp 263 movzwl %dx, %esp # Clear upper half of %esp
273 sti # Now we should have a working stack 264 sti # Now we should have a working stack
274 265