diff options
Diffstat (limited to 'arch/x86/boot')
-rw-r--r-- | arch/x86/boot/compressed/head_64.S | 52 |
1 files changed, 31 insertions, 21 deletions
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 06cc7e59352b..26c3def43ace 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S | |||
@@ -21,8 +21,8 @@ | |||
21 | /* | 21 | /* |
22 | * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 | 22 | * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 |
23 | */ | 23 | */ |
24 | .code32 | 24 | .code32 |
25 | .text | 25 | .text |
26 | 26 | ||
27 | #include <linux/linkage.h> | 27 | #include <linux/linkage.h> |
28 | #include <asm/segment.h> | 28 | #include <asm/segment.h> |
@@ -33,12 +33,14 @@ | |||
33 | #include <asm/processor-flags.h> | 33 | #include <asm/processor-flags.h> |
34 | #include <asm/asm-offsets.h> | 34 | #include <asm/asm-offsets.h> |
35 | 35 | ||
36 | .section ".text.head" | 36 | .section ".text.head" |
37 | .code32 | 37 | .code32 |
38 | ENTRY(startup_32) | 38 | ENTRY(startup_32) |
39 | cld | 39 | cld |
40 | /* test KEEP_SEGMENTS flag to see if the bootloader is asking | 40 | /* |
41 | * us to not reload segments */ | 41 | * Test KEEP_SEGMENTS flag to see if the bootloader is asking |
42 | * us to not reload segments | ||
43 | */ | ||
42 | testb $(1<<6), BP_loadflags(%esi) | 44 | testb $(1<<6), BP_loadflags(%esi) |
43 | jnz 1f | 45 | jnz 1f |
44 | 46 | ||
@@ -49,7 +51,8 @@ ENTRY(startup_32) | |||
49 | movl %eax, %ss | 51 | movl %eax, %ss |
50 | 1: | 52 | 1: |
51 | 53 | ||
52 | /* Calculate the delta between where we were compiled to run | 54 | /* |
55 | * Calculate the delta between where we were compiled to run | ||
53 | * at and where we were actually loaded at. This can only be done | 56 | * at and where we were actually loaded at. This can only be done |
54 | * with a short local call on x86. Nothing else will tell us what | 57 | * with a short local call on x86. Nothing else will tell us what |
55 | * address we are running at. The reserved chunk of the real-mode | 58 | * address we are running at. The reserved chunk of the real-mode |
@@ -70,10 +73,11 @@ ENTRY(startup_32) | |||
70 | testl %eax, %eax | 73 | testl %eax, %eax |
71 | jnz no_longmode | 74 | jnz no_longmode |
72 | 75 | ||
73 | /* Compute the delta between where we were compiled to run at | 76 | /* |
77 | * Compute the delta between where we were compiled to run at | ||
74 | * and where the code will actually run at. | 78 | * and where the code will actually run at. |
75 | */ | 79 | * |
76 | /* %ebp contains the address we are loaded at by the boot loader and %ebx | 80 | * %ebp contains the address we are loaded at by the boot loader and %ebx |
77 | * contains the address where we should move the kernel image temporarily | 81 | * contains the address where we should move the kernel image temporarily |
78 | * for safe in-place decompression. | 82 | * for safe in-place decompression. |
79 | */ | 83 | */ |
@@ -114,7 +118,7 @@ ENTRY(startup_32) | |||
114 | /* | 118 | /* |
115 | * Build early 4G boot pagetable | 119 | * Build early 4G boot pagetable |
116 | */ | 120 | */ |
117 | /* Initialize Page tables to 0*/ | 121 | /* Initialize Page tables to 0 */ |
118 | leal pgtable(%ebx), %edi | 122 | leal pgtable(%ebx), %edi |
119 | xorl %eax, %eax | 123 | xorl %eax, %eax |
120 | movl $((4096*6)/4), %ecx | 124 | movl $((4096*6)/4), %ecx |
@@ -155,7 +159,8 @@ ENTRY(startup_32) | |||
155 | btsl $_EFER_LME, %eax | 159 | btsl $_EFER_LME, %eax |
156 | wrmsr | 160 | wrmsr |
157 | 161 | ||
158 | /* Setup for the jump to 64bit mode | 162 | /* |
163 | * Setup for the jump to 64bit mode | ||
159 | * | 164 | * |
160 | * When the jump is performend we will be in long mode but | 165 | * When the jump is performend we will be in long mode but |
161 | * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1 | 166 | * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1 |
@@ -184,7 +189,8 @@ no_longmode: | |||
184 | 189 | ||
185 | #include "../../kernel/verify_cpu_64.S" | 190 | #include "../../kernel/verify_cpu_64.S" |
186 | 191 | ||
187 | /* Be careful here startup_64 needs to be at a predictable | 192 | /* |
193 | * Be careful here startup_64 needs to be at a predictable | ||
188 | * address so I can export it in an ELF header. Bootloaders | 194 | * address so I can export it in an ELF header. Bootloaders |
189 | * should look at the ELF header to find this address, as | 195 | * should look at the ELF header to find this address, as |
190 | * it may change in the future. | 196 | * it may change in the future. |
@@ -192,7 +198,8 @@ no_longmode: | |||
192 | .code64 | 198 | .code64 |
193 | .org 0x200 | 199 | .org 0x200 |
194 | ENTRY(startup_64) | 200 | ENTRY(startup_64) |
195 | /* We come here either from startup_32 or directly from a | 201 | /* |
202 | * We come here either from startup_32 or directly from a | ||
196 | * 64bit bootloader. If we come here from a bootloader we depend on | 203 | * 64bit bootloader. If we come here from a bootloader we depend on |
197 | * an identity mapped page table being provied that maps our | 204 | * an identity mapped page table being provied that maps our |
198 | * entire text+data+bss and hopefully all of memory. | 205 | * entire text+data+bss and hopefully all of memory. |
@@ -209,7 +216,8 @@ ENTRY(startup_64) | |||
209 | movl $0x20, %eax | 216 | movl $0x20, %eax |
210 | ltr %ax | 217 | ltr %ax |
211 | 218 | ||
212 | /* Compute the decompressed kernel start address. It is where | 219 | /* |
220 | * Compute the decompressed kernel start address. It is where | ||
213 | * we were loaded at aligned to a 2M boundary. %rbp contains the | 221 | * we were loaded at aligned to a 2M boundary. %rbp contains the |
214 | * decompressed kernel start address. | 222 | * decompressed kernel start address. |
215 | * | 223 | * |
@@ -241,7 +249,8 @@ ENTRY(startup_64) | |||
241 | addq $(32768 + 18 + 4095), %rbx | 249 | addq $(32768 + 18 + 4095), %rbx |
242 | andq $~4095, %rbx | 250 | andq $~4095, %rbx |
243 | 251 | ||
244 | /* Copy the compressed kernel to the end of our buffer | 252 | /* |
253 | * Copy the compressed kernel to the end of our buffer | ||
245 | * where decompression in place becomes safe. | 254 | * where decompression in place becomes safe. |
246 | */ | 255 | */ |
247 | leaq _end_before_pgt(%rip), %r8 | 256 | leaq _end_before_pgt(%rip), %r8 |
@@ -260,7 +269,7 @@ ENTRY(startup_64) | |||
260 | leaq relocated(%rbx), %rax | 269 | leaq relocated(%rbx), %rax |
261 | jmp *%rax | 270 | jmp *%rax |
262 | 271 | ||
263 | .section ".text" | 272 | .text |
264 | relocated: | 273 | relocated: |
265 | 274 | ||
266 | /* | 275 | /* |
@@ -271,8 +280,7 @@ relocated: | |||
271 | leaq _end_before_pgt(%rbx), %rcx | 280 | leaq _end_before_pgt(%rbx), %rcx |
272 | subq %rdi, %rcx | 281 | subq %rdi, %rcx |
273 | cld | 282 | cld |
274 | rep | 283 | rep stosb |
275 | stosb | ||
276 | 284 | ||
277 | /* Setup the stack */ | 285 | /* Setup the stack */ |
278 | leaq boot_stack_end(%rip), %rsp | 286 | leaq boot_stack_end(%rip), %rsp |
@@ -311,9 +319,11 @@ gdt: | |||
311 | .quad 0x0000000000000000 /* TS continued */ | 319 | .quad 0x0000000000000000 /* TS continued */ |
312 | gdt_end: | 320 | gdt_end: |
313 | 321 | ||
314 | .bss | 322 | /* |
315 | /* Stack and heap for uncompression */ | 323 | * Stack and heap for uncompression |
316 | .balign 4 | 324 | */ |
325 | .bss | ||
326 | .balign 4 | ||
317 | boot_heap: | 327 | boot_heap: |
318 | .fill BOOT_HEAP_SIZE, 1, 0 | 328 | .fill BOOT_HEAP_SIZE, 1, 0 |
319 | boot_stack: | 329 | boot_stack: |