aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2006-12-06 20:14:04 -0500
committerAndi Kleen <andi@basil.nowhere.org>2006-12-06 20:14:04 -0500
commit968de4f02621db35b8ae5239c8cfc6664fb872d8 (patch)
tree9388da7f18f9511e1bbfeaf934cba8dbc696e9f4 /arch/i386
parentfd593d12770d4a0d1ff095d44b96436c18479ee8 (diff)
[PATCH] i386: Relocatable kernel support
This patch modifies the i386 kernel so that if CONFIG_RELOCATABLE is selected it will be able to be loaded at any 4K aligned address below 1G. The technique used is to compile the decompressor with -fPIC and modify it so the decompressor is fully relocatable. For the main kernel relocations are generated. Resulting in a kernel that is relocatable with no runtime overhead and no need to modify the source code. A reserved 32bit word in the parameters has been assigned to serve as a stack so we figure out where are running. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Vivek Goyal <vgoyal@in.ibm.com> Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/Kconfig12
-rw-r--r--arch/i386/Makefile4
-rw-r--r--arch/i386/boot/compressed/Makefile28
-rw-r--r--arch/i386/boot/compressed/head.S184
-rw-r--r--arch/i386/boot/compressed/misc.c261
-rw-r--r--arch/i386/boot/compressed/relocs.c563
-rw-r--r--arch/i386/boot/compressed/vmlinux.lds43
-rw-r--r--arch/i386/boot/compressed/vmlinux.scr3
-rw-r--r--arch/i386/boot/setup.S29
9 files changed, 918 insertions, 209 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 8ff1c6fb5aa1..d588ca874bb4 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -773,6 +773,18 @@ config CRASH_DUMP
773 PHYSICAL_START. 773 PHYSICAL_START.
774 For more details see Documentation/kdump/kdump.txt 774 For more details see Documentation/kdump/kdump.txt
775 775
776config RELOCATABLE
777 bool "Build a relocatable kernel"
778 help
779 This build a kernel image that retains relocation information
780 so it can be loaded someplace besides the default 1MB.
781 The relocations tend to the kernel binary about 10% larger,
782 but are discarded at runtime.
783
784 One use is for the kexec on panic case where the recovery kernel
785 must live at a different physical address than the primary
786 kernel.
787
776config PHYSICAL_START 788config PHYSICAL_START
777 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) 789 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
778 790
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index 0677908dfa06..d1aca52bf690 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -26,7 +26,9 @@ endif
26 26
27LDFLAGS := -m elf_i386 27LDFLAGS := -m elf_i386
28OBJCOPYFLAGS := -O binary -R .note -R .comment -S 28OBJCOPYFLAGS := -O binary -R .note -R .comment -S
29LDFLAGS_vmlinux := 29ifdef CONFIG_RELOCATABLE
30LDFLAGS_vmlinux := --emit-relocs
31endif
30CHECKFLAGS += -D__i386__ 32CHECKFLAGS += -D__i386__
31 33
32CFLAGS += -pipe -msoft-float 34CFLAGS += -pipe -msoft-float
diff --git a/arch/i386/boot/compressed/Makefile b/arch/i386/boot/compressed/Makefile
index 258ea95224f6..cc28da3a881e 100644
--- a/arch/i386/boot/compressed/Makefile
+++ b/arch/i386/boot/compressed/Makefile
@@ -4,22 +4,42 @@
4# create a compressed vmlinux image from the original vmlinux 4# create a compressed vmlinux image from the original vmlinux
5# 5#
6 6
7targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o 7targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o \
8 vmlinux.bin.all vmlinux.relocs
8EXTRA_AFLAGS := -traditional 9EXTRA_AFLAGS := -traditional
9 10
10LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -e startup_32 11LDFLAGS_vmlinux := -T
12CFLAGS_misc.o += -fPIC
13hostprogs-y := relocs
11 14
12$(obj)/vmlinux: $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o FORCE 15$(obj)/vmlinux: $(src)/vmlinux.lds $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o FORCE
13 $(call if_changed,ld) 16 $(call if_changed,ld)
14 @: 17 @:
15 18
16$(obj)/vmlinux.bin: vmlinux FORCE 19$(obj)/vmlinux.bin: vmlinux FORCE
17 $(call if_changed,objcopy) 20 $(call if_changed,objcopy)
18 21
22quiet_cmd_relocs = RELOCS $@
23 cmd_relocs = $(obj)/relocs $< > $@
24$(obj)/vmlinux.relocs: vmlinux $(obj)/relocs FORCE
25 $(call if_changed,relocs)
26
27vmlinux.bin.all-y := $(obj)/vmlinux.bin
28vmlinux.bin.all-$(CONFIG_RELOCATABLE) += $(obj)/vmlinux.relocs
29quiet_cmd_relocbin = BUILD $@
30 cmd_relocbin = cat $(filter-out FORCE,$^) > $@
31$(obj)/vmlinux.bin.all: $(vmlinux.bin.all-y) FORCE
32 $(call if_changed,relocbin)
33
34ifdef CONFIG_RELOCATABLE
35$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE
36 $(call if_changed,gzip)
37else
19$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE 38$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
20 $(call if_changed,gzip) 39 $(call if_changed,gzip)
40endif
21 41
22LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T 42LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
23 43
24$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE 44$(obj)/piggy.o: $(src)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
25 $(call if_changed,ld) 45 $(call if_changed,ld)
diff --git a/arch/i386/boot/compressed/head.S b/arch/i386/boot/compressed/head.S
index 40a8de8270a9..e4dd7a6b9b0f 100644
--- a/arch/i386/boot/compressed/head.S
+++ b/arch/i386/boot/compressed/head.S
@@ -25,9 +25,11 @@
25 25
26#include <linux/linkage.h> 26#include <linux/linkage.h>
27#include <asm/segment.h> 27#include <asm/segment.h>
28#include <asm/page.h>
28 29
30.section ".text.head"
29 .globl startup_32 31 .globl startup_32
30 32
31startup_32: 33startup_32:
32 cld 34 cld
33 cli 35 cli
@@ -36,93 +38,141 @@ startup_32:
36 movl %eax,%es 38 movl %eax,%es
37 movl %eax,%fs 39 movl %eax,%fs
38 movl %eax,%gs 40 movl %eax,%gs
41 movl %eax,%ss
39 42
40 lss stack_start,%esp 43/* Calculate the delta between where we were compiled to run
41 xorl %eax,%eax 44 * at and where we were actually loaded at. This can only be done
421: incl %eax # check that A20 really IS enabled 45 * with a short local call on x86. Nothing else will tell us what
43 movl %eax,0x000000 # loop forever if it isn't 46 * address we are running at. The reserved chunk of the real-mode
44 cmpl %eax,0x100000 47 * data at 0x34-0x3f are used as the stack for this calculation.
45 je 1b 48 * Only 4 bytes are needed.
49 */
50 leal 0x40(%esi), %esp
51 call 1f
521: popl %ebp
53 subl $1b, %ebp
54
55/* Compute the delta between where we were compiled to run at
56 * and where the code will actually run at.
57 */
58 /* Start with the delta to where the kernel will run at. If we are
59 * a relocatable kernel this is the delta to our load address otherwise
60 * this is the delta to CONFIG_PHYSICAL start.
61 */
62#ifdef CONFIG_RELOCATABLE
63 movl %ebp, %ebx
64#else
65 movl $(CONFIG_PHYSICAL_START - startup_32), %ebx
66#endif
67
68 /* Replace the compressed data size with the uncompressed size */
69 subl input_len(%ebp), %ebx
70 movl output_len(%ebp), %eax
71 addl %eax, %ebx
72 /* Add 8 bytes for every 32K input block */
73 shrl $12, %eax
74 addl %eax, %ebx
75 /* Add 32K + 18 bytes of extra slack */
76 addl $(32768 + 18), %ebx
77 /* Align on a 4K boundary */
78 addl $4095, %ebx
79 andl $~4095, %ebx
80
81/* Copy the compressed kernel to the end of our buffer
82 * where decompression in place becomes safe.
83 */
84 pushl %esi
85 leal _end(%ebp), %esi
86 leal _end(%ebx), %edi
87 movl $(_end - startup_32), %ecx
88 std
89 rep
90 movsb
91 cld
92 popl %esi
93
94/* Compute the kernel start address.
95 */
96#ifdef CONFIG_RELOCATABLE
97 leal startup_32(%ebp), %ebp
98#else
99 movl $CONFIG_PHYSICAL_START, %ebp
100#endif
46 101
47/* 102/*
48 * Initialize eflags. Some BIOS's leave bits like NT set. This would 103 * Jump to the relocated address.
49 * confuse the debugger if this code is traced.
50 * XXX - best to initialize before switching to protected mode.
51 */ 104 */
52 pushl $0 105 leal relocated(%ebx), %eax
53 popfl 106 jmp *%eax
107.section ".text"
108relocated:
109
54/* 110/*
55 * Clear BSS 111 * Clear BSS
56 */ 112 */
57 xorl %eax,%eax 113 xorl %eax,%eax
58 movl $_edata,%edi 114 leal _edata(%ebx),%edi
59 movl $_end,%ecx 115 leal _end(%ebx), %ecx
60 subl %edi,%ecx 116 subl %edi,%ecx
61 cld 117 cld
62 rep 118 rep
63 stosb 119 stosb
120
121/*
122 * Setup the stack for the decompressor
123 */
124 leal stack_end(%ebx), %esp
125
64/* 126/*
65 * Do the decompression, and jump to the new kernel.. 127 * Do the decompression, and jump to the new kernel..
66 */ 128 */
67 subl $16,%esp # place for structure on the stack 129 movl output_len(%ebx), %eax
68 movl %esp,%eax 130 pushl %eax
131 pushl %ebp # output address
132 movl input_len(%ebx), %eax
133 pushl %eax # input_len
134 leal input_data(%ebx), %eax
135 pushl %eax # input_data
136 leal _end(%ebx), %eax
137 pushl %eax # end of the image as third argument
69 pushl %esi # real mode pointer as second arg 138 pushl %esi # real mode pointer as second arg
70 pushl %eax # address of structure as first arg
71 call decompress_kernel 139 call decompress_kernel
72 orl %eax,%eax 140 addl $20, %esp
73 jnz 3f 141 popl %ecx
74 popl %esi # discard address 142
75 popl %esi # real mode pointer 143#if CONFIG_RELOCATABLE
76 xorl %ebx,%ebx 144/* Find the address of the relocations.
77 ljmp $(__BOOT_CS), $CONFIG_PHYSICAL_START 145 */
146 movl %ebp, %edi
147 addl %ecx, %edi
148
149/* Calculate the delta between where vmlinux was compiled to run
150 * and where it was actually loaded.
151 */
152 movl %ebp, %ebx
153 subl $CONFIG_PHYSICAL_START, %ebx
78 154
79/* 155/*
80 * We come here, if we were loaded high. 156 * Process relocations.
81 * We need to move the move-in-place routine down to 0x1000
82 * and then start it with the buffer addresses in registers,
83 * which we got from the stack.
84 */ 157 */
853: 158
86 movl $move_routine_start,%esi 1591: subl $4, %edi
87 movl $0x1000,%edi 160 movl 0(%edi), %ecx
88 movl $move_routine_end,%ecx 161 testl %ecx, %ecx
89 subl %esi,%ecx 162 jz 2f
90 addl $3,%ecx 163 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
91 shrl $2,%ecx 164 jmp 1b
92 cld 1652:
93 rep 166#endif
94 movsl
95
96 popl %esi # discard the address
97 popl %ebx # real mode pointer
98 popl %esi # low_buffer_start
99 popl %ecx # lcount
100 popl %edx # high_buffer_start
101 popl %eax # hcount
102 movl $CONFIG_PHYSICAL_START,%edi
103 cli # make sure we don't get interrupted
104 ljmp $(__BOOT_CS), $0x1000 # and jump to the move routine
105 167
106/* 168/*
107 * Routine (template) for moving the decompressed kernel in place, 169 * Jump to the decompressed kernel.
108 * if we were high loaded. This _must_ PIC-code !
109 */ 170 */
110move_routine_start:
111 movl %ecx,%ebp
112 shrl $2,%ecx
113 rep
114 movsl
115 movl %ebp,%ecx
116 andl $3,%ecx
117 rep
118 movsb
119 movl %edx,%esi
120 movl %eax,%ecx # NOTE: rep movsb won't move if %ecx == 0
121 addl $3,%ecx
122 shrl $2,%ecx
123 rep
124 movsl
125 movl %ebx,%esi # Restore setup pointer
126 xorl %ebx,%ebx 171 xorl %ebx,%ebx
127 ljmp $(__BOOT_CS), $CONFIG_PHYSICAL_START 172 jmp *%ebp
128move_routine_end: 173
174.bss
175.balign 4
176stack:
177 .fill 4096, 1, 0
178stack_end:
diff --git a/arch/i386/boot/compressed/misc.c b/arch/i386/boot/compressed/misc.c
index 20970ff44119..4eac24e95a10 100644
--- a/arch/i386/boot/compressed/misc.c
+++ b/arch/i386/boot/compressed/misc.c
@@ -13,6 +13,88 @@
13#include <linux/vmalloc.h> 13#include <linux/vmalloc.h>
14#include <linux/screen_info.h> 14#include <linux/screen_info.h>
15#include <asm/io.h> 15#include <asm/io.h>
16#include <asm/page.h>
17
18/* WARNING!!
19 * This code is compiled with -fPIC and it is relocated dynamically
20 * at run time, but no relocation processing is performed.
21 * This means that it is not safe to place pointers in static structures.
22 */
23
24/*
25 * Getting to provable safe in place decompression is hard.
26 * Worst case behaviours need to be analized.
27 * Background information:
28 *
29 * The file layout is:
30 * magic[2]
31 * method[1]
32 * flags[1]
33 * timestamp[4]
34 * extraflags[1]
35 * os[1]
36 * compressed data blocks[N]
37 * crc[4] orig_len[4]
38 *
39 * resulting in 18 bytes of non compressed data overhead.
40 *
41 * Files divided into blocks
42 * 1 bit (last block flag)
43 * 2 bits (block type)
44 *
45 * 1 block occurs every 32K -1 bytes or when there 50% compression has been achieved.
46 * The smallest block type encoding is always used.
47 *
48 * stored:
49 * 32 bits length in bytes.
50 *
51 * fixed:
52 * magic fixed tree.
53 * symbols.
54 *
55 * dynamic:
56 * dynamic tree encoding.
57 * symbols.
58 *
59 *
60 * The buffer for decompression in place is the length of the
61 * uncompressed data, plus a small amount extra to keep the algorithm safe.
62 * The compressed data is placed at the end of the buffer. The output
63 * pointer is placed at the start of the buffer and the input pointer
64 * is placed where the compressed data starts. Problems will occur
65 * when the output pointer overruns the input pointer.
66 *
67 * The output pointer can only overrun the input pointer if the input
68 * pointer is moving faster than the output pointer. A condition only
69 * triggered by data whose compressed form is larger than the uncompressed
70 * form.
71 *
72 * The worst case at the block level is a growth of the compressed data
73 * of 5 bytes per 32767 bytes.
74 *
75 * The worst case internal to a compressed block is very hard to figure.
76 * The worst case can at least be boundined by having one bit that represents
77 * 32764 bytes and then all of the rest of the bytes representing the very
78 * very last byte.
79 *
80 * All of which is enough to compute an amount of extra data that is required
81 * to be safe. To avoid problems at the block level allocating 5 extra bytes
82 * per 32767 bytes of data is sufficient. To avoind problems internal to a block
83 * adding an extra 32767 bytes (the worst case uncompressed block size) is
84 * sufficient, to ensure that in the worst case the decompressed data for
85 * block will stop the byte before the compressed data for a block begins.
86 * To avoid problems with the compressed data's meta information an extra 18
87 * bytes are needed. Leading to the formula:
88 *
89 * extra_bytes = (uncompressed_size >> 12) + 32768 + 18 + decompressor_size.
90 *
91 * Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
92 * Adding 32768 instead of 32767 just makes for round numbers.
93 * Adding the decompressor_size is necessary as it musht live after all
94 * of the data as well. Last I measured the decompressor is about 14K.
95 * 10K of actuall data and 4K of bss.
96 *
97 */
16 98
17/* 99/*
18 * gzip declarations 100 * gzip declarations
@@ -29,15 +111,20 @@ typedef unsigned char uch;
29typedef unsigned short ush; 111typedef unsigned short ush;
30typedef unsigned long ulg; 112typedef unsigned long ulg;
31 113
32#define WSIZE 0x8000 /* Window size must be at least 32k, */ 114#define WSIZE 0x80000000 /* Window size must be at least 32k,
33 /* and a power of two */ 115 * and a power of two
116 * We don't actually have a window just
117 * a huge output buffer so I report
118 * a 2G windows size, as that should
119 * always be larger than our output buffer.
120 */
34 121
35static uch *inbuf; /* input buffer */ 122static uch *inbuf; /* input buffer */
36static uch window[WSIZE]; /* Sliding window buffer */ 123static uch *window; /* Sliding window buffer, (and final output buffer) */
37 124
38static unsigned insize = 0; /* valid bytes in inbuf */ 125static unsigned insize; /* valid bytes in inbuf */
39static unsigned inptr = 0; /* index of next byte to be processed in inbuf */ 126static unsigned inptr; /* index of next byte to be processed in inbuf */
40static unsigned outcnt = 0; /* bytes in output buffer */ 127static unsigned outcnt; /* bytes in output buffer */
41 128
42/* gzip flag byte */ 129/* gzip flag byte */
43#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ 130#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
@@ -88,8 +175,6 @@ extern unsigned char input_data[];
88extern int input_len; 175extern int input_len;
89 176
90static long bytes_out = 0; 177static long bytes_out = 0;
91static uch *output_data;
92static unsigned long output_ptr = 0;
93 178
94static void *malloc(int size); 179static void *malloc(int size);
95static void free(void *where); 180static void free(void *where);
@@ -99,17 +184,10 @@ static void *memcpy(void *dest, const void *src, unsigned n);
99 184
100static void putstr(const char *); 185static void putstr(const char *);
101 186
102extern int end; 187static unsigned long free_mem_ptr;
103static long free_mem_ptr = (long)&end; 188static unsigned long free_mem_end_ptr;
104static long free_mem_end_ptr;
105 189
106#define INPLACE_MOVE_ROUTINE 0x1000
107#define LOW_BUFFER_START 0x2000
108#define LOW_BUFFER_MAX 0x90000
109#define HEAP_SIZE 0x3000 190#define HEAP_SIZE 0x3000
110static unsigned int low_buffer_end, low_buffer_size;
111static int high_loaded =0;
112static uch *high_buffer_start /* = (uch *)(((ulg)&end) + HEAP_SIZE)*/;
113 191
114static char *vidmem = (char *)0xb8000; 192static char *vidmem = (char *)0xb8000;
115static int vidport; 193static int vidport;
@@ -150,7 +228,7 @@ static void gzip_mark(void **ptr)
150 228
151static void gzip_release(void **ptr) 229static void gzip_release(void **ptr)
152{ 230{
153 free_mem_ptr = (long) *ptr; 231 free_mem_ptr = (unsigned long) *ptr;
154} 232}
155 233
156static void scroll(void) 234static void scroll(void)
@@ -178,7 +256,7 @@ static void putstr(const char *s)
178 y--; 256 y--;
179 } 257 }
180 } else { 258 } else {
181 vidmem [ ( x + cols * y ) * 2 ] = c; 259 vidmem [ ( x + cols * y ) * 2 ] = c;
182 if ( ++x >= cols ) { 260 if ( ++x >= cols ) {
183 x = 0; 261 x = 0;
184 if ( ++y >= lines ) { 262 if ( ++y >= lines ) {
@@ -223,58 +301,31 @@ static void* memcpy(void* dest, const void* src, unsigned n)
223 */ 301 */
224static int fill_inbuf(void) 302static int fill_inbuf(void)
225{ 303{
226 if (insize != 0) { 304 error("ran out of input data");
227 error("ran out of input data"); 305 return 0;
228 }
229
230 inbuf = input_data;
231 insize = input_len;
232 inptr = 1;
233 return inbuf[0];
234} 306}
235 307
236/* =========================================================================== 308/* ===========================================================================
237 * Write the output window window[0..outcnt-1] and update crc and bytes_out. 309 * Write the output window window[0..outcnt-1] and update crc and bytes_out.
238 * (Used for the decompressed data only.) 310 * (Used for the decompressed data only.)
239 */ 311 */
240static void flush_window_low(void)
241{
242 ulg c = crc; /* temporary variable */
243 unsigned n;
244 uch *in, *out, ch;
245
246 in = window;
247 out = &output_data[output_ptr];
248 for (n = 0; n < outcnt; n++) {
249 ch = *out++ = *in++;
250 c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
251 }
252 crc = c;
253 bytes_out += (ulg)outcnt;
254 output_ptr += (ulg)outcnt;
255 outcnt = 0;
256}
257
258static void flush_window_high(void)
259{
260 ulg c = crc; /* temporary variable */
261 unsigned n;
262 uch *in, ch;
263 in = window;
264 for (n = 0; n < outcnt; n++) {
265 ch = *output_data++ = *in++;
266 if ((ulg)output_data == low_buffer_end) output_data=high_buffer_start;
267 c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
268 }
269 crc = c;
270 bytes_out += (ulg)outcnt;
271 outcnt = 0;
272}
273
274static void flush_window(void) 312static void flush_window(void)
275{ 313{
276 if (high_loaded) flush_window_high(); 314 /* With my window equal to my output buffer
277 else flush_window_low(); 315 * I only need to compute the crc here.
316 */
317 ulg c = crc; /* temporary variable */
318 unsigned n;
319 uch *in, ch;
320
321 in = window;
322 for (n = 0; n < outcnt; n++) {
323 ch = *in++;
324 c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
325 }
326 crc = c;
327 bytes_out += (ulg)outcnt;
328 outcnt = 0;
278} 329}
279 330
280static void error(char *x) 331static void error(char *x)
@@ -286,66 +337,8 @@ static void error(char *x)
286 while(1); /* Halt */ 337 while(1); /* Halt */
287} 338}
288 339
289#define STACK_SIZE (4096) 340asmlinkage void decompress_kernel(void *rmode, unsigned long end,
290 341 uch *input_data, unsigned long input_len, uch *output)
291long user_stack [STACK_SIZE];
292
293struct {
294 long * a;
295 short b;
296 } stack_start = { & user_stack [STACK_SIZE] , __BOOT_DS };
297
298static void setup_normal_output_buffer(void)
299{
300#ifdef STANDARD_MEMORY_BIOS_CALL
301 if (RM_EXT_MEM_K < 1024) error("Less than 2MB of memory");
302#else
303 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory");
304#endif
305 output_data = (unsigned char *)CONFIG_PHYSICAL_START; /* Normally Points to 1M */
306 free_mem_end_ptr = (long)real_mode;
307}
308
309struct moveparams {
310 uch *low_buffer_start; int lcount;
311 uch *high_buffer_start; int hcount;
312};
313
314static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
315{
316 high_buffer_start = (uch *)(((ulg)&end) + HEAP_SIZE);
317#ifdef STANDARD_MEMORY_BIOS_CALL
318 if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
319#else
320 if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
321#endif
322 mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START;
323 low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX
324 ? LOW_BUFFER_MAX : (unsigned int)real_mode) & ~0xfff;
325 low_buffer_size = low_buffer_end - LOW_BUFFER_START;
326 high_loaded = 1;
327 free_mem_end_ptr = (long)high_buffer_start;
328 if ( (CONFIG_PHYSICAL_START + low_buffer_size) > ((ulg)high_buffer_start)) {
329 high_buffer_start = (uch *)(CONFIG_PHYSICAL_START + low_buffer_size);
330 mv->hcount = 0; /* say: we need not to move high_buffer */
331 }
332 else mv->hcount = -1;
333 mv->high_buffer_start = high_buffer_start;
334}
335
336static void close_output_buffer_if_we_run_high(struct moveparams *mv)
337{
338 if (bytes_out > low_buffer_size) {
339 mv->lcount = low_buffer_size;
340 if (mv->hcount)
341 mv->hcount = bytes_out - low_buffer_size;
342 } else {
343 mv->lcount = bytes_out;
344 mv->hcount = 0;
345 }
346}
347
348asmlinkage int decompress_kernel(struct moveparams *mv, void *rmode)
349{ 342{
350 real_mode = rmode; 343 real_mode = rmode;
351 344
@@ -360,13 +353,25 @@ asmlinkage int decompress_kernel(struct moveparams *mv, void *rmode)
360 lines = RM_SCREEN_INFO.orig_video_lines; 353 lines = RM_SCREEN_INFO.orig_video_lines;
361 cols = RM_SCREEN_INFO.orig_video_cols; 354 cols = RM_SCREEN_INFO.orig_video_cols;
362 355
363 if (free_mem_ptr < 0x100000) setup_normal_output_buffer(); 356 window = output; /* Output buffer (Normally at 1M) */
364 else setup_output_buffer_if_we_run_high(mv); 357 free_mem_ptr = end; /* Heap */
358 free_mem_end_ptr = end + HEAP_SIZE;
359 inbuf = input_data; /* Input buffer */
360 insize = input_len;
361 inptr = 0;
362
363 if (((u32)output - CONFIG_PHYSICAL_START) & 0x3fffff)
364 error("Destination address not 4M aligned");
365 if (end > ((-__PAGE_OFFSET-(512 <<20)-1) & 0x7fffffff))
366 error("Destination address too large");
367#ifndef CONFIG_RELOCATABLE
368 if ((u32)output != CONFIG_PHYSICAL_START)
369 error("Wrong destination address");
370#endif
365 371
366 makecrc(); 372 makecrc();
367 putstr("Uncompressing Linux... "); 373 putstr("Uncompressing Linux... ");
368 gunzip(); 374 gunzip();
369 putstr("Ok, booting the kernel.\n"); 375 putstr("Ok, booting the kernel.\n");
370 if (high_loaded) close_output_buffer_if_we_run_high(mv); 376 return;
371 return high_loaded;
372} 377}
diff --git a/arch/i386/boot/compressed/relocs.c b/arch/i386/boot/compressed/relocs.c
new file mode 100644
index 000000000000..0551ceb21bed
--- /dev/null
+++ b/arch/i386/boot/compressed/relocs.c
@@ -0,0 +1,563 @@
1#include <stdio.h>
2#include <stdarg.h>
3#include <stdlib.h>
4#include <stdint.h>
5#include <string.h>
6#include <errno.h>
7#include <unistd.h>
8#include <elf.h>
9#include <byteswap.h>
10#define USE_BSD
11#include <endian.h>
12
13#define MAX_SHDRS 100
14static Elf32_Ehdr ehdr;
15static Elf32_Shdr shdr[MAX_SHDRS];
16static Elf32_Sym *symtab[MAX_SHDRS];
17static Elf32_Rel *reltab[MAX_SHDRS];
18static char *strtab[MAX_SHDRS];
19static unsigned long reloc_count, reloc_idx;
20static unsigned long *relocs;
21
22static void die(char *fmt, ...)
23{
24 va_list ap;
25 va_start(ap, fmt);
26 vfprintf(stderr, fmt, ap);
27 va_end(ap);
28 exit(1);
29}
30
31static const char *sym_type(unsigned type)
32{
33 static const char *type_name[] = {
34#define SYM_TYPE(X) [X] = #X
35 SYM_TYPE(STT_NOTYPE),
36 SYM_TYPE(STT_OBJECT),
37 SYM_TYPE(STT_FUNC),
38 SYM_TYPE(STT_SECTION),
39 SYM_TYPE(STT_FILE),
40 SYM_TYPE(STT_COMMON),
41 SYM_TYPE(STT_TLS),
42#undef SYM_TYPE
43 };
44 const char *name = "unknown sym type name";
45 if (type < sizeof(type_name)/sizeof(type_name[0])) {
46 name = type_name[type];
47 }
48 return name;
49}
50
51static const char *sym_bind(unsigned bind)
52{
53 static const char *bind_name[] = {
54#define SYM_BIND(X) [X] = #X
55 SYM_BIND(STB_LOCAL),
56 SYM_BIND(STB_GLOBAL),
57 SYM_BIND(STB_WEAK),
58#undef SYM_BIND
59 };
60 const char *name = "unknown sym bind name";
61 if (bind < sizeof(bind_name)/sizeof(bind_name[0])) {
62 name = bind_name[bind];
63 }
64 return name;
65}
66
67static const char *sym_visibility(unsigned visibility)
68{
69 static const char *visibility_name[] = {
70#define SYM_VISIBILITY(X) [X] = #X
71 SYM_VISIBILITY(STV_DEFAULT),
72 SYM_VISIBILITY(STV_INTERNAL),
73 SYM_VISIBILITY(STV_HIDDEN),
74 SYM_VISIBILITY(STV_PROTECTED),
75#undef SYM_VISIBILITY
76 };
77 const char *name = "unknown sym visibility name";
78 if (visibility < sizeof(visibility_name)/sizeof(visibility_name[0])) {
79 name = visibility_name[visibility];
80 }
81 return name;
82}
83
84static const char *rel_type(unsigned type)
85{
86 static const char *type_name[] = {
87#define REL_TYPE(X) [X] = #X
88 REL_TYPE(R_386_NONE),
89 REL_TYPE(R_386_32),
90 REL_TYPE(R_386_PC32),
91 REL_TYPE(R_386_GOT32),
92 REL_TYPE(R_386_PLT32),
93 REL_TYPE(R_386_COPY),
94 REL_TYPE(R_386_GLOB_DAT),
95 REL_TYPE(R_386_JMP_SLOT),
96 REL_TYPE(R_386_RELATIVE),
97 REL_TYPE(R_386_GOTOFF),
98 REL_TYPE(R_386_GOTPC),
99#undef REL_TYPE
100 };
101 const char *name = "unknown type rel type name";
102 if (type < sizeof(type_name)/sizeof(type_name[0])) {
103 name = type_name[type];
104 }
105 return name;
106}
107
108static const char *sec_name(unsigned shndx)
109{
110 const char *sec_strtab;
111 const char *name;
112 sec_strtab = strtab[ehdr.e_shstrndx];
113 name = "<noname>";
114 if (shndx < ehdr.e_shnum) {
115 name = sec_strtab + shdr[shndx].sh_name;
116 }
117 else if (shndx == SHN_ABS) {
118 name = "ABSOLUTE";
119 }
120 else if (shndx == SHN_COMMON) {
121 name = "COMMON";
122 }
123 return name;
124}
125
126static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym)
127{
128 const char *name;
129 name = "<noname>";
130 if (sym->st_name) {
131 name = sym_strtab + sym->st_name;
132 }
133 else {
134 name = sec_name(shdr[sym->st_shndx].sh_name);
135 }
136 return name;
137}
138
139
140
141#if BYTE_ORDER == LITTLE_ENDIAN
142#define le16_to_cpu(val) (val)
143#define le32_to_cpu(val) (val)
144#endif
145#if BYTE_ORDER == BIG_ENDIAN
146#define le16_to_cpu(val) bswap_16(val)
147#define le32_to_cpu(val) bswap_32(val)
148#endif
149
150static uint16_t elf16_to_cpu(uint16_t val)
151{
152 return le16_to_cpu(val);
153}
154
155static uint32_t elf32_to_cpu(uint32_t val)
156{
157 return le32_to_cpu(val);
158}
159
160static void read_ehdr(FILE *fp)
161{
162 if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) {
163 die("Cannot read ELF header: %s\n",
164 strerror(errno));
165 }
166 if (memcmp(ehdr.e_ident, ELFMAG, 4) != 0) {
167 die("No ELF magic\n");
168 }
169 if (ehdr.e_ident[EI_CLASS] != ELFCLASS32) {
170 die("Not a 32 bit executable\n");
171 }
172 if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB) {
173 die("Not a LSB ELF executable\n");
174 }
175 if (ehdr.e_ident[EI_VERSION] != EV_CURRENT) {
176 die("Unknown ELF version\n");
177 }
178 /* Convert the fields to native endian */
179 ehdr.e_type = elf16_to_cpu(ehdr.e_type);
180 ehdr.e_machine = elf16_to_cpu(ehdr.e_machine);
181 ehdr.e_version = elf32_to_cpu(ehdr.e_version);
182 ehdr.e_entry = elf32_to_cpu(ehdr.e_entry);
183 ehdr.e_phoff = elf32_to_cpu(ehdr.e_phoff);
184 ehdr.e_shoff = elf32_to_cpu(ehdr.e_shoff);
185 ehdr.e_flags = elf32_to_cpu(ehdr.e_flags);
186 ehdr.e_ehsize = elf16_to_cpu(ehdr.e_ehsize);
187 ehdr.e_phentsize = elf16_to_cpu(ehdr.e_phentsize);
188 ehdr.e_phnum = elf16_to_cpu(ehdr.e_phnum);
189 ehdr.e_shentsize = elf16_to_cpu(ehdr.e_shentsize);
190 ehdr.e_shnum = elf16_to_cpu(ehdr.e_shnum);
191 ehdr.e_shstrndx = elf16_to_cpu(ehdr.e_shstrndx);
192
193 if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) {
194 die("Unsupported ELF header type\n");
195 }
196 if (ehdr.e_machine != EM_386) {
197 die("Not for x86\n");
198 }
199 if (ehdr.e_version != EV_CURRENT) {
200 die("Unknown ELF version\n");
201 }
202 if (ehdr.e_ehsize != sizeof(Elf32_Ehdr)) {
203 die("Bad Elf header size\n");
204 }
205 if (ehdr.e_phentsize != sizeof(Elf32_Phdr)) {
206 die("Bad program header entry\n");
207 }
208 if (ehdr.e_shentsize != sizeof(Elf32_Shdr)) {
209 die("Bad section header entry\n");
210 }
211 if (ehdr.e_shstrndx >= ehdr.e_shnum) {
212 die("String table index out of bounds\n");
213 }
214}
215
216static void read_shdrs(FILE *fp)
217{
218 int i;
219 if (ehdr.e_shnum > MAX_SHDRS) {
220 die("%d section headers supported: %d\n",
221 ehdr.e_shnum, MAX_SHDRS);
222 }
223 if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) {
224 die("Seek to %d failed: %s\n",
225 ehdr.e_shoff, strerror(errno));
226 }
227 if (fread(&shdr, sizeof(shdr[0]), ehdr.e_shnum, fp) != ehdr.e_shnum) {
228 die("Cannot read ELF section headers: %s\n",
229 strerror(errno));
230 }
231 for(i = 0; i < ehdr.e_shnum; i++) {
232 shdr[i].sh_name = elf32_to_cpu(shdr[i].sh_name);
233 shdr[i].sh_type = elf32_to_cpu(shdr[i].sh_type);
234 shdr[i].sh_flags = elf32_to_cpu(shdr[i].sh_flags);
235 shdr[i].sh_addr = elf32_to_cpu(shdr[i].sh_addr);
236 shdr[i].sh_offset = elf32_to_cpu(shdr[i].sh_offset);
237 shdr[i].sh_size = elf32_to_cpu(shdr[i].sh_size);
238 shdr[i].sh_link = elf32_to_cpu(shdr[i].sh_link);
239 shdr[i].sh_info = elf32_to_cpu(shdr[i].sh_info);
240 shdr[i].sh_addralign = elf32_to_cpu(shdr[i].sh_addralign);
241 shdr[i].sh_entsize = elf32_to_cpu(shdr[i].sh_entsize);
242 }
243
244}
245
246static void read_strtabs(FILE *fp)
247{
248 int i;
249 for(i = 0; i < ehdr.e_shnum; i++) {
250 if (shdr[i].sh_type != SHT_STRTAB) {
251 continue;
252 }
253 strtab[i] = malloc(shdr[i].sh_size);
254 if (!strtab[i]) {
255 die("malloc of %d bytes for strtab failed\n",
256 shdr[i].sh_size);
257 }
258 if (fseek(fp, shdr[i].sh_offset, SEEK_SET) < 0) {
259 die("Seek to %d failed: %s\n",
260 shdr[i].sh_offset, strerror(errno));
261 }
262 if (fread(strtab[i], 1, shdr[i].sh_size, fp) != shdr[i].sh_size) {
263 die("Cannot read symbol table: %s\n",
264 strerror(errno));
265 }
266 }
267}
268
269static void read_symtabs(FILE *fp)
270{
271 int i,j;
272 for(i = 0; i < ehdr.e_shnum; i++) {
273 if (shdr[i].sh_type != SHT_SYMTAB) {
274 continue;
275 }
276 symtab[i] = malloc(shdr[i].sh_size);
277 if (!symtab[i]) {
278 die("malloc of %d bytes for symtab failed\n",
279 shdr[i].sh_size);
280 }
281 if (fseek(fp, shdr[i].sh_offset, SEEK_SET) < 0) {
282 die("Seek to %d failed: %s\n",
283 shdr[i].sh_offset, strerror(errno));
284 }
285 if (fread(symtab[i], 1, shdr[i].sh_size, fp) != shdr[i].sh_size) {
286 die("Cannot read symbol table: %s\n",
287 strerror(errno));
288 }
289 for(j = 0; j < shdr[i].sh_size/sizeof(symtab[i][0]); j++) {
290 symtab[i][j].st_name = elf32_to_cpu(symtab[i][j].st_name);
291 symtab[i][j].st_value = elf32_to_cpu(symtab[i][j].st_value);
292 symtab[i][j].st_size = elf32_to_cpu(symtab[i][j].st_size);
293 symtab[i][j].st_shndx = elf16_to_cpu(symtab[i][j].st_shndx);
294 }
295 }
296}
297
298
299static void read_relocs(FILE *fp)
300{
301 int i,j;
302 for(i = 0; i < ehdr.e_shnum; i++) {
303 if (shdr[i].sh_type != SHT_REL) {
304 continue;
305 }
306 reltab[i] = malloc(shdr[i].sh_size);
307 if (!reltab[i]) {
308 die("malloc of %d bytes for relocs failed\n",
309 shdr[i].sh_size);
310 }
311 if (fseek(fp, shdr[i].sh_offset, SEEK_SET) < 0) {
312 die("Seek to %d failed: %s\n",
313 shdr[i].sh_offset, strerror(errno));
314 }
315 if (fread(reltab[i], 1, shdr[i].sh_size, fp) != shdr[i].sh_size) {
316 die("Cannot read symbol table: %s\n",
317 strerror(errno));
318 }
319 for(j = 0; j < shdr[i].sh_size/sizeof(reltab[0][0]); j++) {
320 reltab[i][j].r_offset = elf32_to_cpu(reltab[i][j].r_offset);
321 reltab[i][j].r_info = elf32_to_cpu(reltab[i][j].r_info);
322 }
323 }
324}
325
326
327static void print_absolute_symbols(void)
328{
329 int i;
330 printf("Absolute symbols\n");
331 printf(" Num: Value Size Type Bind Visibility Name\n");
332 for(i = 0; i < ehdr.e_shnum; i++) {
333 char *sym_strtab;
334 Elf32_Sym *sh_symtab;
335 int j;
336 if (shdr[i].sh_type != SHT_SYMTAB) {
337 continue;
338 }
339 sh_symtab = symtab[i];
340 sym_strtab = strtab[shdr[i].sh_link];
341 for(j = 0; j < shdr[i].sh_size/sizeof(symtab[0][0]); j++) {
342 Elf32_Sym *sym;
343 const char *name;
344 sym = &symtab[i][j];
345 name = sym_name(sym_strtab, sym);
346 if (sym->st_shndx != SHN_ABS) {
347 continue;
348 }
349 printf("%5d %08x %5d %10s %10s %12s %s\n",
350 j, sym->st_value, sym->st_size,
351 sym_type(ELF32_ST_TYPE(sym->st_info)),
352 sym_bind(ELF32_ST_BIND(sym->st_info)),
353 sym_visibility(ELF32_ST_VISIBILITY(sym->st_other)),
354 name);
355 }
356 }
357 printf("\n");
358}
359
360static void print_absolute_relocs(void)
361{
362 int i;
363 printf("Absolute relocations\n");
364 printf("Offset Info Type Sym.Value Sym.Name\n");
365 for(i = 0; i < ehdr.e_shnum; i++) {
366 char *sym_strtab;
367 Elf32_Sym *sh_symtab;
368 unsigned sec_applies, sec_symtab;
369 int j;
370 if (shdr[i].sh_type != SHT_REL) {
371 continue;
372 }
373 sec_symtab = shdr[i].sh_link;
374 sec_applies = shdr[i].sh_info;
375 if (!(shdr[sec_applies].sh_flags & SHF_ALLOC)) {
376 continue;
377 }
378 sh_symtab = symtab[sec_symtab];
379 sym_strtab = strtab[shdr[sec_symtab].sh_link];
380 for(j = 0; j < shdr[i].sh_size/sizeof(reltab[0][0]); j++) {
381 Elf32_Rel *rel;
382 Elf32_Sym *sym;
383 const char *name;
384 rel = &reltab[i][j];
385 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
386 name = sym_name(sym_strtab, sym);
387 if (sym->st_shndx != SHN_ABS) {
388 continue;
389 }
390 printf("%08x %08x %10s %08x %s\n",
391 rel->r_offset,
392 rel->r_info,
393 rel_type(ELF32_R_TYPE(rel->r_info)),
394 sym->st_value,
395 name);
396 }
397 }
398 printf("\n");
399}
400
401static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
402{
403 int i;
404 /* Walk through the relocations */
405 for(i = 0; i < ehdr.e_shnum; i++) {
406 char *sym_strtab;
407 Elf32_Sym *sh_symtab;
408 unsigned sec_applies, sec_symtab;
409 int j;
410 if (shdr[i].sh_type != SHT_REL) {
411 continue;
412 }
413 sec_symtab = shdr[i].sh_link;
414 sec_applies = shdr[i].sh_info;
415 if (!(shdr[sec_applies].sh_flags & SHF_ALLOC)) {
416 continue;
417 }
418 sh_symtab = symtab[sec_symtab];
419 sym_strtab = strtab[shdr[sec_symtab].sh_link];
420 for(j = 0; j < shdr[i].sh_size/sizeof(reltab[0][0]); j++) {
421 Elf32_Rel *rel;
422 Elf32_Sym *sym;
423 unsigned r_type;
424 rel = &reltab[i][j];
425 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
426 r_type = ELF32_R_TYPE(rel->r_info);
427 /* Don't visit relocations to absolute symbols */
428 if (sym->st_shndx == SHN_ABS) {
429 continue;
430 }
431 if (r_type == R_386_PC32) {
432 /* PC relative relocations don't need to be adjusted */
433 }
434 else if (r_type == R_386_32) {
435 /* Visit relocations that need to be adjusted */
436 visit(rel, sym);
437 }
438 else {
439 die("Unsupported relocation type: %d\n", r_type);
440 }
441 }
442 }
443}
444
445static void count_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
446{
447 reloc_count += 1;
448}
449
450static void collect_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
451{
452 /* Remember the address that needs to be adjusted. */
453 relocs[reloc_idx++] = rel->r_offset;
454}
455
456static int cmp_relocs(const void *va, const void *vb)
457{
458 const unsigned long *a, *b;
459 a = va; b = vb;
460 return (*a == *b)? 0 : (*a > *b)? 1 : -1;
461}
462
463static void emit_relocs(int as_text)
464{
465 int i;
466 /* Count how many relocations I have and allocate space for them. */
467 reloc_count = 0;
468 walk_relocs(count_reloc);
469 relocs = malloc(reloc_count * sizeof(relocs[0]));
470 if (!relocs) {
471 die("malloc of %d entries for relocs failed\n",
472 reloc_count);
473 }
474 /* Collect up the relocations */
475 reloc_idx = 0;
476 walk_relocs(collect_reloc);
477
478 /* Order the relocations for more efficient processing */
479 qsort(relocs, reloc_count, sizeof(relocs[0]), cmp_relocs);
480
481 /* Print the relocations */
482 if (as_text) {
483 /* Print the relocations in a form suitable that
484 * gas will like.
485 */
486 printf(".section \".data.reloc\",\"a\"\n");
487 printf(".balign 4\n");
488 for(i = 0; i < reloc_count; i++) {
489 printf("\t .long 0x%08lx\n", relocs[i]);
490 }
491 printf("\n");
492 }
493 else {
494 unsigned char buf[4];
495 buf[0] = buf[1] = buf[2] = buf[3] = 0;
496 /* Print a stop */
497 printf("%c%c%c%c", buf[0], buf[1], buf[2], buf[3]);
498 /* Now print each relocation */
499 for(i = 0; i < reloc_count; i++) {
500 buf[0] = (relocs[i] >> 0) & 0xff;
501 buf[1] = (relocs[i] >> 8) & 0xff;
502 buf[2] = (relocs[i] >> 16) & 0xff;
503 buf[3] = (relocs[i] >> 24) & 0xff;
504 printf("%c%c%c%c", buf[0], buf[1], buf[2], buf[3]);
505 }
506 }
507}
508
509static void usage(void)
510{
511 die("i386_reloc [--abs | --text] vmlinux\n");
512}
513
514int main(int argc, char **argv)
515{
516 int show_absolute;
517 int as_text;
518 const char *fname;
519 FILE *fp;
520 int i;
521
522 show_absolute = 0;
523 as_text = 0;
524 fname = NULL;
525 for(i = 1; i < argc; i++) {
526 char *arg = argv[i];
527 if (*arg == '-') {
528 if (strcmp(argv[1], "--abs") == 0) {
529 show_absolute = 1;
530 continue;
531 }
532 else if (strcmp(argv[1], "--text") == 0) {
533 as_text = 1;
534 continue;
535 }
536 }
537 else if (!fname) {
538 fname = arg;
539 continue;
540 }
541 usage();
542 }
543 if (!fname) {
544 usage();
545 }
546 fp = fopen(fname, "r");
547 if (!fp) {
548 die("Cannot open %s: %s\n",
549 fname, strerror(errno));
550 }
551 read_ehdr(fp);
552 read_shdrs(fp);
553 read_strtabs(fp);
554 read_symtabs(fp);
555 read_relocs(fp);
556 if (show_absolute) {
557 print_absolute_symbols();
558 print_absolute_relocs();
559 return 0;
560 }
561 emit_relocs(as_text);
562 return 0;
563}
diff --git a/arch/i386/boot/compressed/vmlinux.lds b/arch/i386/boot/compressed/vmlinux.lds
new file mode 100644
index 000000000000..cc4854f6c6c1
--- /dev/null
+++ b/arch/i386/boot/compressed/vmlinux.lds
@@ -0,0 +1,43 @@
1OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
2OUTPUT_ARCH(i386)
3ENTRY(startup_32)
4SECTIONS
5{
6 /* Be careful parts of head.S assume startup_32 is at
7 * address 0.
8 */
9 . = 0 ;
10 .text.head : {
11 _head = . ;
12 *(.text.head)
13 _ehead = . ;
14 }
15 .data.compressed : {
16 *(.data.compressed)
17 }
18 .text : {
19 _text = .; /* Text */
20 *(.text)
21 *(.text.*)
22 _etext = . ;
23 }
24 .rodata : {
25 _rodata = . ;
26 *(.rodata) /* read-only data */
27 *(.rodata.*)
28 _erodata = . ;
29 }
30 .data : {
31 _data = . ;
32 *(.data)
33 *(.data.*)
34 _edata = . ;
35 }
36 .bss : {
37 _bss = . ;
38 *(.bss)
39 *(.bss.*)
40 *(COMMON)
41 _end = . ;
42 }
43}
diff --git a/arch/i386/boot/compressed/vmlinux.scr b/arch/i386/boot/compressed/vmlinux.scr
index 1ed9d791f863..707a88f7f29e 100644
--- a/arch/i386/boot/compressed/vmlinux.scr
+++ b/arch/i386/boot/compressed/vmlinux.scr
@@ -1,9 +1,10 @@
1SECTIONS 1SECTIONS
2{ 2{
3 .data : { 3 .data.compressed : {
4 input_len = .; 4 input_len = .;
5 LONG(input_data_end - input_data) input_data = .; 5 LONG(input_data_end - input_data) input_data = .;
6 *(.data) 6 *(.data)
7 output_len = . - 4;
7 input_data_end = .; 8 input_data_end = .;
8 } 9 }
9} 10}
diff --git a/arch/i386/boot/setup.S b/arch/i386/boot/setup.S
index 3aec4538a113..9aa8b0518184 100644
--- a/arch/i386/boot/setup.S
+++ b/arch/i386/boot/setup.S
@@ -588,11 +588,6 @@ rmodeswtch_normal:
588 call default_switch 588 call default_switch
589 589
590rmodeswtch_end: 590rmodeswtch_end:
591# we get the code32 start address and modify the below 'jmpi'
592# (loader may have changed it)
593 movl %cs:code32_start, %eax
594 movl %eax, %cs:code32
595
596# Now we move the system to its rightful place ... but we check if we have a 591# Now we move the system to its rightful place ... but we check if we have a
597# big-kernel. In that case we *must* not move it ... 592# big-kernel. In that case we *must* not move it ...
598 testb $LOADED_HIGH, %cs:loadflags 593 testb $LOADED_HIGH, %cs:loadflags
@@ -788,11 +783,12 @@ a20_err_msg:
788a20_done: 783a20_done:
789 784
790#endif /* CONFIG_X86_VOYAGER */ 785#endif /* CONFIG_X86_VOYAGER */
791# set up gdt and idt 786# set up gdt and idt and 32bit start address
792 lidt idt_48 # load idt with 0,0 787 lidt idt_48 # load idt with 0,0
793 xorl %eax, %eax # Compute gdt_base 788 xorl %eax, %eax # Compute gdt_base
794 movw %ds, %ax # (Convert %ds:gdt to a linear ptr) 789 movw %ds, %ax # (Convert %ds:gdt to a linear ptr)
795 shll $4, %eax 790 shll $4, %eax
791 addl %eax, code32
796 addl $gdt, %eax 792 addl $gdt, %eax
797 movl %eax, (gdt_48+2) 793 movl %eax, (gdt_48+2)
798 lgdt gdt_48 # load gdt with whatever is 794 lgdt gdt_48 # load gdt with whatever is
@@ -851,9 +847,26 @@ flush_instr:
851# Manual, Mixing 16-bit and 32-bit code, page 16-6) 847# Manual, Mixing 16-bit and 32-bit code, page 16-6)
852 848
853 .byte 0x66, 0xea # prefix + jmpi-opcode 849 .byte 0x66, 0xea # prefix + jmpi-opcode
854code32: .long 0x1000 # will be set to 0x100000 850code32: .long startup_32 # will be set to %cs+startup_32
855 # for big kernels
856 .word __BOOT_CS 851 .word __BOOT_CS
852.code32
853startup_32:
854 movl $(__BOOT_DS), %eax
855 movl %eax, %ds
856 movl %eax, %es
857 movl %eax, %fs
858 movl %eax, %gs
859 movl %eax, %ss
860
861 xorl %eax, %eax
8621: incl %eax # check that A20 really IS enabled
863 movl %eax, 0x00000000 # loop forever if it isn't
864 cmpl %eax, 0x00100000
865 je 1b
866
867 # Jump to the 32bit entry point
868 jmpl *(code32_start - start + (DELTA_INITSEG << 4))(%esi)
869.code16
857 870
858# Here's a bunch of information about your current kernel.. 871# Here's a bunch of information about your current kernel..
859kernel_version: .ascii UTS_RELEASE 872kernel_version: .ascii UTS_RELEASE