diff options
Diffstat (limited to 'arch/x86/kernel/head_64.S')
-rw-r--r-- | arch/x86/kernel/head_64.S | 101 |
1 files changed, 33 insertions, 68 deletions
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 10a1955bb1d1..b07ac7b217cb 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/page.h> | 18 | #include <asm/page.h> |
19 | #include <asm/msr.h> | 19 | #include <asm/msr.h> |
20 | #include <asm/cache.h> | 20 | #include <asm/cache.h> |
21 | #include <asm/processor-flags.h> | ||
21 | 22 | ||
22 | #ifdef CONFIG_PARAVIRT | 23 | #ifdef CONFIG_PARAVIRT |
23 | #include <asm/asm-offsets.h> | 24 | #include <asm/asm-offsets.h> |
@@ -31,6 +32,13 @@ | |||
31 | * | 32 | * |
32 | */ | 33 | */ |
33 | 34 | ||
35 | #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) | ||
36 | |||
37 | L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET) | ||
38 | L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET) | ||
39 | L4_START_KERNEL = pgd_index(__START_KERNEL_map) | ||
40 | L3_START_KERNEL = pud_index(__START_KERNEL_map) | ||
41 | |||
34 | .text | 42 | .text |
35 | .section .text.head | 43 | .section .text.head |
36 | .code64 | 44 | .code64 |
@@ -76,8 +84,8 @@ startup_64: | |||
76 | /* Fixup the physical addresses in the page table | 84 | /* Fixup the physical addresses in the page table |
77 | */ | 85 | */ |
78 | addq %rbp, init_level4_pgt + 0(%rip) | 86 | addq %rbp, init_level4_pgt + 0(%rip) |
79 | addq %rbp, init_level4_pgt + (258*8)(%rip) | 87 | addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip) |
80 | addq %rbp, init_level4_pgt + (511*8)(%rip) | 88 | addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip) |
81 | 89 | ||
82 | addq %rbp, level3_ident_pgt + 0(%rip) | 90 | addq %rbp, level3_ident_pgt + 0(%rip) |
83 | 91 | ||
@@ -128,7 +136,7 @@ ident_complete: | |||
128 | /* Fixup phys_base */ | 136 | /* Fixup phys_base */ |
129 | addq %rbp, phys_base(%rip) | 137 | addq %rbp, phys_base(%rip) |
130 | 138 | ||
131 | #ifdef CONFIG_SMP | 139 | #ifdef CONFIG_X86_TRAMPOLINE |
132 | addq %rbp, trampoline_level4_pgt + 0(%rip) | 140 | addq %rbp, trampoline_level4_pgt + 0(%rip) |
133 | addq %rbp, trampoline_level4_pgt + (511*8)(%rip) | 141 | addq %rbp, trampoline_level4_pgt + (511*8)(%rip) |
134 | #endif | 142 | #endif |
@@ -154,9 +162,7 @@ ENTRY(secondary_startup_64) | |||
154 | */ | 162 | */ |
155 | 163 | ||
156 | /* Enable PAE mode and PGE */ | 164 | /* Enable PAE mode and PGE */ |
157 | xorq %rax, %rax | 165 | movl $(X86_CR4_PAE | X86_CR4_PGE), %eax |
158 | btsq $5, %rax | ||
159 | btsq $7, %rax | ||
160 | movq %rax, %cr4 | 166 | movq %rax, %cr4 |
161 | 167 | ||
162 | /* Setup early boot stage 4 level pagetables. */ | 168 | /* Setup early boot stage 4 level pagetables. */ |
@@ -184,19 +190,15 @@ ENTRY(secondary_startup_64) | |||
184 | 1: wrmsr /* Make changes effective */ | 190 | 1: wrmsr /* Make changes effective */ |
185 | 191 | ||
186 | /* Setup cr0 */ | 192 | /* Setup cr0 */ |
187 | #define CR0_PM 1 /* protected mode */ | 193 | #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ |
188 | #define CR0_MP (1<<1) | 194 | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ |
189 | #define CR0_ET (1<<4) | 195 | X86_CR0_PG) |
190 | #define CR0_NE (1<<5) | 196 | movl $CR0_STATE, %eax |
191 | #define CR0_WP (1<<16) | ||
192 | #define CR0_AM (1<<18) | ||
193 | #define CR0_PAGING (1<<31) | ||
194 | movl $CR0_PM|CR0_MP|CR0_ET|CR0_NE|CR0_WP|CR0_AM|CR0_PAGING,%eax | ||
195 | /* Make changes effective */ | 197 | /* Make changes effective */ |
196 | movq %rax, %cr0 | 198 | movq %rax, %cr0 |
197 | 199 | ||
198 | /* Setup a boot time stack */ | 200 | /* Setup a boot time stack */ |
199 | movq init_rsp(%rip),%rsp | 201 | movq stack_start(%rip),%rsp |
200 | 202 | ||
201 | /* zero EFLAGS after setting rsp */ | 203 | /* zero EFLAGS after setting rsp */ |
202 | pushq $0 | 204 | pushq $0 |
@@ -208,7 +210,7 @@ ENTRY(secondary_startup_64) | |||
208 | * addresses where we're currently running on. We have to do that here | 210 | * addresses where we're currently running on. We have to do that here |
209 | * because in 32bit we couldn't load a 64bit linear address. | 211 | * because in 32bit we couldn't load a 64bit linear address. |
210 | */ | 212 | */ |
211 | lgdt cpu_gdt_descr(%rip) | 213 | lgdt early_gdt_descr(%rip) |
212 | 214 | ||
213 | /* set up data segments. actually 0 would do too */ | 215 | /* set up data segments. actually 0 would do too */ |
214 | movl $__KERNEL_DS,%eax | 216 | movl $__KERNEL_DS,%eax |
@@ -257,8 +259,9 @@ ENTRY(secondary_startup_64) | |||
257 | .quad x86_64_start_kernel | 259 | .quad x86_64_start_kernel |
258 | __FINITDATA | 260 | __FINITDATA |
259 | 261 | ||
260 | ENTRY(init_rsp) | 262 | ENTRY(stack_start) |
261 | .quad init_thread_union+THREAD_SIZE-8 | 263 | .quad init_thread_union+THREAD_SIZE-8 |
264 | .word 0 | ||
262 | 265 | ||
263 | bad_address: | 266 | bad_address: |
264 | jmp bad_address | 267 | jmp bad_address |
@@ -327,11 +330,11 @@ early_idt_ripmsg: | |||
327 | ENTRY(name) | 330 | ENTRY(name) |
328 | 331 | ||
329 | /* Automate the creation of 1 to 1 mapping pmd entries */ | 332 | /* Automate the creation of 1 to 1 mapping pmd entries */ |
330 | #define PMDS(START, PERM, COUNT) \ | 333 | #define PMDS(START, PERM, COUNT) \ |
331 | i = 0 ; \ | 334 | i = 0 ; \ |
332 | .rept (COUNT) ; \ | 335 | .rept (COUNT) ; \ |
333 | .quad (START) + (i << 21) + (PERM) ; \ | 336 | .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ |
334 | i = i + 1 ; \ | 337 | i = i + 1 ; \ |
335 | .endr | 338 | .endr |
336 | 339 | ||
337 | /* | 340 | /* |
@@ -342,9 +345,9 @@ ENTRY(name) | |||
342 | */ | 345 | */ |
343 | NEXT_PAGE(init_level4_pgt) | 346 | NEXT_PAGE(init_level4_pgt) |
344 | .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE | 347 | .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE |
345 | .fill 257,8,0 | 348 | .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 |
346 | .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE | 349 | .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE |
347 | .fill 252,8,0 | 350 | .org init_level4_pgt + L4_START_KERNEL*8, 0 |
348 | /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ | 351 | /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ |
349 | .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE | 352 | .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE |
350 | 353 | ||
@@ -353,7 +356,7 @@ NEXT_PAGE(level3_ident_pgt) | |||
353 | .fill 511,8,0 | 356 | .fill 511,8,0 |
354 | 357 | ||
355 | NEXT_PAGE(level3_kernel_pgt) | 358 | NEXT_PAGE(level3_kernel_pgt) |
356 | .fill 510,8,0 | 359 | .fill L3_START_KERNEL,8,0 |
357 | /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ | 360 | /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ |
358 | .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE | 361 | .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE |
359 | .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE | 362 | .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE |
@@ -384,7 +387,7 @@ NEXT_PAGE(level2_kernel_pgt) | |||
384 | * If you want to increase this then increase MODULES_VADDR | 387 | * If you want to increase this then increase MODULES_VADDR |
385 | * too.) | 388 | * too.) |
386 | */ | 389 | */ |
387 | PMDS(0, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL, | 390 | PMDS(0, __PAGE_KERNEL_LARGE_EXEC, |
388 | KERNEL_IMAGE_SIZE/PMD_SIZE) | 391 | KERNEL_IMAGE_SIZE/PMD_SIZE) |
389 | 392 | ||
390 | NEXT_PAGE(level2_spare_pgt) | 393 | NEXT_PAGE(level2_spare_pgt) |
@@ -395,54 +398,16 @@ NEXT_PAGE(level2_spare_pgt) | |||
395 | 398 | ||
396 | .data | 399 | .data |
397 | .align 16 | 400 | .align 16 |
398 | .globl cpu_gdt_descr | 401 | .globl early_gdt_descr |
399 | cpu_gdt_descr: | 402 | early_gdt_descr: |
400 | .word gdt_end-cpu_gdt_table-1 | 403 | .word GDT_ENTRIES*8-1 |
401 | gdt: | 404 | .quad per_cpu__gdt_page |
402 | .quad cpu_gdt_table | ||
403 | #ifdef CONFIG_SMP | ||
404 | .rept NR_CPUS-1 | ||
405 | .word 0 | ||
406 | .quad 0 | ||
407 | .endr | ||
408 | #endif | ||
409 | 405 | ||
410 | ENTRY(phys_base) | 406 | ENTRY(phys_base) |
411 | /* This must match the first entry in level2_kernel_pgt */ | 407 | /* This must match the first entry in level2_kernel_pgt */ |
412 | .quad 0x0000000000000000 | 408 | .quad 0x0000000000000000 |
413 | 409 | ||
414 | /* We need valid kernel segments for data and code in long mode too | ||
415 | * IRET will check the segment types kkeil 2000/10/28 | ||
416 | * Also sysret mandates a special GDT layout | ||
417 | */ | ||
418 | |||
419 | .section .data.page_aligned, "aw" | ||
420 | .align PAGE_SIZE | ||
421 | |||
422 | /* The TLS descriptors are currently at a different place compared to i386. | ||
423 | Hopefully nobody expects them at a fixed place (Wine?) */ | ||
424 | 410 | ||
425 | ENTRY(cpu_gdt_table) | ||
426 | .quad 0x0000000000000000 /* NULL descriptor */ | ||
427 | .quad 0x00cf9b000000ffff /* __KERNEL32_CS */ | ||
428 | .quad 0x00af9b000000ffff /* __KERNEL_CS */ | ||
429 | .quad 0x00cf93000000ffff /* __KERNEL_DS */ | ||
430 | .quad 0x00cffb000000ffff /* __USER32_CS */ | ||
431 | .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */ | ||
432 | .quad 0x00affb000000ffff /* __USER_CS */ | ||
433 | .quad 0x0 /* unused */ | ||
434 | .quad 0,0 /* TSS */ | ||
435 | .quad 0,0 /* LDT */ | ||
436 | .quad 0,0,0 /* three TLS descriptors */ | ||
437 | .quad 0x0000f40000000000 /* node/CPU stored in limit */ | ||
438 | gdt_end: | ||
439 | /* asm/segment.h:GDT_ENTRIES must match this */ | ||
440 | /* This should be a multiple of the cache line size */ | ||
441 | /* GDTs of other CPUs are now dynamically allocated */ | ||
442 | |||
443 | /* zero the remaining page */ | ||
444 | .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0 | ||
445 | |||
446 | .section .bss, "aw", @nobits | 411 | .section .bss, "aw", @nobits |
447 | .align L1_CACHE_BYTES | 412 | .align L1_CACHE_BYTES |
448 | ENTRY(idt_table) | 413 | ENTRY(idt_table) |