aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/head_32.S
diff options
context:
space:
mode:
authorIan Campbell <ijc@hellion.org.uk>2008-02-09 17:24:09 -0500
committerThomas Gleixner <tglx@linutronix.de>2008-02-09 17:24:09 -0500
commit551889a6e2a24a9c06fd453ea03b57b7746ffdc0 (patch)
treed906bbc4e4a96e243a14416bf02feb7a4ffd4d7a /arch/x86/kernel/head_32.S
parent185c045c245f46485ad8bbd8cc1100e986ff3f13 (diff)
x86: construct 32-bit boot time page tables in native format.
Specifically the boot time page tables in a CONFIG_X86_PAE=y enabled kernel are in PAE format. early_ioremap is updated to use the standard page table accessors. Clear any mappings beyond max_low_pfn from the boot page tables in native_pagetable_setup_start because the initial mappings can extend beyond the range of physical memory and into the vmalloc area. Derived from patches by Eric Biederman and H. Peter Anvin. [ jeremy@goop.org: PAE swapper_pg_dir needs to be page-sized fix ] Signed-off-by: Ian Campbell <ijc@hellion.org.uk> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Eric W. Biederman <ebiederm@xmission.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Mika Penttilä <mika.penttila@kolumbus.fi> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/head_32.S')
-rw-r--r--arch/x86/kernel/head_32.S151
1 files changed, 116 insertions, 35 deletions
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 5d8c5730686b..74ef4a41f224 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -19,6 +19,10 @@
19#include <asm/thread_info.h> 19#include <asm/thread_info.h>
20#include <asm/asm-offsets.h> 20#include <asm/asm-offsets.h>
21#include <asm/setup.h> 21#include <asm/setup.h>
22#include <asm/processor-flags.h>
23
24/* Physical address */
25#define pa(X) ((X) - __PAGE_OFFSET)
22 26
23/* 27/*
24 * References to members of the new_cpu_data structure. 28 * References to members of the new_cpu_data structure.
@@ -80,10 +84,6 @@ INIT_MAP_BEYOND_END = BOOTBITMAP_SIZE + (PAGE_TABLE_SIZE + ALLOCATOR_SLOP)*PAGE_
80 */ 84 */
81.section .text.head,"ax",@progbits 85.section .text.head,"ax",@progbits
82ENTRY(startup_32) 86ENTRY(startup_32)
83 /* check to see if KEEP_SEGMENTS flag is meaningful */
84 cmpw $0x207, BP_version(%esi)
85 jb 1f
86
87 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 87 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
88 us to not reload segments */ 88 us to not reload segments */
89 testb $(1<<6), BP_loadflags(%esi) 89 testb $(1<<6), BP_loadflags(%esi)
@@ -92,7 +92,7 @@ ENTRY(startup_32)
92/* 92/*
93 * Set segments to known values. 93 * Set segments to known values.
94 */ 94 */
951: lgdt boot_gdt_descr - __PAGE_OFFSET 95 lgdt pa(boot_gdt_descr)
96 movl $(__BOOT_DS),%eax 96 movl $(__BOOT_DS),%eax
97 movl %eax,%ds 97 movl %eax,%ds
98 movl %eax,%es 98 movl %eax,%es
@@ -105,8 +105,8 @@ ENTRY(startup_32)
105 */ 105 */
106 cld 106 cld
107 xorl %eax,%eax 107 xorl %eax,%eax
108 movl $__bss_start - __PAGE_OFFSET,%edi 108 movl $pa(__bss_start),%edi
109 movl $__bss_stop - __PAGE_OFFSET,%ecx 109 movl $pa(__bss_stop),%ecx
110 subl %edi,%ecx 110 subl %edi,%ecx
111 shrl $2,%ecx 111 shrl $2,%ecx
112 rep ; stosl 112 rep ; stosl
@@ -118,31 +118,32 @@ ENTRY(startup_32)
118 * (kexec on panic case). Hence copy out the parameters before initializing 118 * (kexec on panic case). Hence copy out the parameters before initializing
119 * page tables. 119 * page tables.
120 */ 120 */
121 movl $(boot_params - __PAGE_OFFSET),%edi 121 movl $pa(boot_params),%edi
122 movl $(PARAM_SIZE/4),%ecx 122 movl $(PARAM_SIZE/4),%ecx
123 cld 123 cld
124 rep 124 rep
125 movsl 125 movsl
126 movl boot_params - __PAGE_OFFSET + NEW_CL_POINTER,%esi 126 movl pa(boot_params) + NEW_CL_POINTER,%esi
127 andl %esi,%esi 127 andl %esi,%esi
128 jz 1f # No comand line 128 jz 1f # No comand line
129 movl $(boot_command_line - __PAGE_OFFSET),%edi 129 movl $pa(boot_command_line),%edi
130 movl $(COMMAND_LINE_SIZE/4),%ecx 130 movl $(COMMAND_LINE_SIZE/4),%ecx
131 rep 131 rep
132 movsl 132 movsl
1331: 1331:
134 134
135#ifdef CONFIG_PARAVIRT 135#ifdef CONFIG_PARAVIRT
136 cmpw $0x207, (boot_params + BP_version - __PAGE_OFFSET) 136 /* This is can only trip for a broken bootloader... */
137 cmpw $0x207, pa(boot_params + BP_version)
137 jb default_entry 138 jb default_entry
138 139
139 /* Paravirt-compatible boot parameters. Look to see what architecture 140 /* Paravirt-compatible boot parameters. Look to see what architecture
140 we're booting under. */ 141 we're booting under. */
141 movl (boot_params + BP_hardware_subarch - __PAGE_OFFSET), %eax 142 movl pa(boot_params + BP_hardware_subarch), %eax
142 cmpl $num_subarch_entries, %eax 143 cmpl $num_subarch_entries, %eax
143 jae bad_subarch 144 jae bad_subarch
144 145
145 movl subarch_entries - __PAGE_OFFSET(,%eax,4), %eax 146 movl pa(subarch_entries)(,%eax,4), %eax
146 subl $__PAGE_OFFSET, %eax 147 subl $__PAGE_OFFSET, %eax
147 jmp *%eax 148 jmp *%eax
148 149
@@ -170,17 +171,68 @@ num_subarch_entries = (. - subarch_entries) / 4
170 * Mappings are created both at virtual address 0 (identity mapping) 171 * Mappings are created both at virtual address 0 (identity mapping)
171 * and PAGE_OFFSET for up to _end+sizeof(page tables)+INIT_MAP_BEYOND_END. 172 * and PAGE_OFFSET for up to _end+sizeof(page tables)+INIT_MAP_BEYOND_END.
172 * 173 *
173 * Warning: don't use %esi or the stack in this code. However, %esp 174 * Note that the stack is not yet set up!
174 * can be used as a GPR if you really need it...
175 */ 175 */
176page_pde_offset = (__PAGE_OFFSET >> 20); 176#define PTE_ATTR 0x007 /* PRESENT+RW+USER */
177#define PDE_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
178#define PGD_ATTR 0x001 /* PRESENT (no other attributes) */
177 179
178default_entry: 180default_entry:
179 movl $(pg0 - __PAGE_OFFSET), %edi 181#ifdef CONFIG_X86_PAE
180 movl $(swapper_pg_dir - __PAGE_OFFSET), %edx 182
181 movl $0x007, %eax /* 0x007 = PRESENT+RW+USER */ 183 /*
184 * In PAE mode swapper_pg_dir is statically defined to contain enough
185 * entries to cover the VMSPLIT option (that is the top 1, 2 or 3
186 * entries). The identity mapping is handled by pointing two PGD
187 * entries to the first kernel PMD.
188 *
189 * Note the upper half of each PMD or PTE are always zero at
190 * this stage.
191 */
192
193#define KPMDS ((0x100000000-__PAGE_OFFSET) >> 30) /* Number of kernel PMDs */
194
195 xorl %ebx,%ebx /* %ebx is kept at zero */
196
197 movl $pa(pg0), %edi
198 movl $pa(swapper_pg_pmd), %edx
199 movl $PTE_ATTR, %eax
20010:
201 leal PDE_ATTR(%edi),%ecx /* Create PMD entry */
202 movl %ecx,(%edx) /* Store PMD entry */
203 /* Upper half already zero */
204 addl $8,%edx
205 movl $512,%ecx
20611:
207 stosl
208 xchgl %eax,%ebx
209 stosl
210 xchgl %eax,%ebx
211 addl $0x1000,%eax
212 loop 11b
213
214 /*
215 * End condition: we must map up to and including INIT_MAP_BEYOND_END
216 * bytes beyond the end of our own page tables.
217 */
218 leal (INIT_MAP_BEYOND_END+PTE_ATTR)(%edi),%ebp
219 cmpl %ebp,%eax
220 jb 10b
2211:
222 movl %edi,pa(init_pg_tables_end)
223
224 /* Do early initialization of the fixmap area */
225 movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax
226 movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
227#else /* Not PAE */
228
229page_pde_offset = (__PAGE_OFFSET >> 20);
230
231 movl $pa(pg0), %edi
232 movl $pa(swapper_pg_dir), %edx
233 movl $PTE_ATTR, %eax
18210: 23410:
183 leal 0x007(%edi),%ecx /* Create PDE entry */ 235 leal PDE_ATTR(%edi),%ecx /* Create PDE entry */
184 movl %ecx,(%edx) /* Store identity PDE entry */ 236 movl %ecx,(%edx) /* Store identity PDE entry */
185 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ 237 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */
186 addl $4,%edx 238 addl $4,%edx
@@ -189,19 +241,20 @@ default_entry:
189 stosl 241 stosl
190 addl $0x1000,%eax 242 addl $0x1000,%eax
191 loop 11b 243 loop 11b
192 /* End condition: we must map up to and including INIT_MAP_BEYOND_END */ 244 /*
193 /* bytes beyond the end of our own page tables; the +0x007 is the attribute bits */ 245 * End condition: we must map up to and including INIT_MAP_BEYOND_END
194 leal (INIT_MAP_BEYOND_END+0x007)(%edi),%ebp 246 * bytes beyond the end of our own page tables; the +0x007 is
247 * the attribute bits
248 */
249 leal (INIT_MAP_BEYOND_END+PTE_ATTR)(%edi),%ebp
195 cmpl %ebp,%eax 250 cmpl %ebp,%eax
196 jb 10b 251 jb 10b
197 movl %edi,(init_pg_tables_end - __PAGE_OFFSET) 252 movl %edi,pa(init_pg_tables_end)
198
199 /* Do an early initialization of the fixmap area */
200 movl $(swapper_pg_dir - __PAGE_OFFSET), %edx
201 movl $(swapper_pg_pmd - __PAGE_OFFSET), %eax
202 addl $0x67, %eax /* 0x67 == _PAGE_TABLE */
203 movl %eax, 4092(%edx)
204 253
254 /* Do early initialization of the fixmap area */
255 movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax
256 movl %eax,pa(swapper_pg_dir+0xffc)
257#endif
205 jmp 3f 258 jmp 3f
206/* 259/*
207 * Non-boot CPU entry point; entered from trampoline.S 260 * Non-boot CPU entry point; entered from trampoline.S
@@ -241,7 +294,7 @@ ENTRY(startup_32_smp)
241 * NOTE! We have to correct for the fact that we're 294 * NOTE! We have to correct for the fact that we're
242 * not yet offset PAGE_OFFSET.. 295 * not yet offset PAGE_OFFSET..
243 */ 296 */
244#define cr4_bits mmu_cr4_features-__PAGE_OFFSET 297#define cr4_bits pa(mmu_cr4_features)
245 movl cr4_bits,%edx 298 movl cr4_bits,%edx
246 andl %edx,%edx 299 andl %edx,%edx
247 jz 6f 300 jz 6f
@@ -276,10 +329,10 @@ ENTRY(startup_32_smp)
276/* 329/*
277 * Enable paging 330 * Enable paging
278 */ 331 */
279 movl $swapper_pg_dir-__PAGE_OFFSET,%eax 332 movl $pa(swapper_pg_dir),%eax
280 movl %eax,%cr3 /* set the page table pointer.. */ 333 movl %eax,%cr3 /* set the page table pointer.. */
281 movl %cr0,%eax 334 movl %cr0,%eax
282 orl $0x80000000,%eax 335 orl $X86_CR0_PG,%eax
283 movl %eax,%cr0 /* ..and set paging (PG) bit */ 336 movl %eax,%cr0 /* ..and set paging (PG) bit */
284 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 337 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
2851: 3381:
@@ -552,16 +605,44 @@ ENTRY(_stext)
552 */ 605 */
553.section ".bss.page_aligned","wa" 606.section ".bss.page_aligned","wa"
554 .align PAGE_SIZE_asm 607 .align PAGE_SIZE_asm
608#ifdef CONFIG_X86_PAE
609ENTRY(swapper_pg_pmd)
610 .fill 1024*KPMDS,4,0
611#else
555ENTRY(swapper_pg_dir) 612ENTRY(swapper_pg_dir)
556 .fill 1024,4,0 613 .fill 1024,4,0
557ENTRY(swapper_pg_pmd) 614#endif
615ENTRY(swapper_pg_fixmap)
558 .fill 1024,4,0 616 .fill 1024,4,0
559ENTRY(empty_zero_page) 617ENTRY(empty_zero_page)
560 .fill 4096,1,0 618 .fill 4096,1,0
561
562/* 619/*
563 * This starts the data section. 620 * This starts the data section.
564 */ 621 */
622#ifdef CONFIG_X86_PAE
623.section ".data.page_aligned","wa"
624 /* Page-aligned for the benefit of paravirt? */
625 .align PAGE_SIZE_asm
626ENTRY(swapper_pg_dir)
627 .long pa(swapper_pg_pmd+PGD_ATTR),0 /* low identity map */
628# if KPMDS == 3
629 .long pa(swapper_pg_pmd+PGD_ATTR),0
630 .long pa(swapper_pg_pmd+PGD_ATTR+0x1000),0
631 .long pa(swapper_pg_pmd+PGD_ATTR+0x2000),0
632# elif KPMDS == 2
633 .long 0,0
634 .long pa(swapper_pg_pmd+PGD_ATTR),0
635 .long pa(swapper_pg_pmd+PGD_ATTR+0x1000),0
636# elif KPMDS == 1
637 .long 0,0
638 .long 0,0
639 .long pa(swapper_pg_pmd+PGD_ATTR),0
640# else
641# error "Kernel PMDs should be 1, 2 or 3"
642# endif
643 .align PAGE_SIZE_asm /* needs to be page-sized too */
644#endif
645
565.data 646.data
566ENTRY(stack_start) 647ENTRY(stack_start)
567 .long init_thread_union+THREAD_SIZE 648 .long init_thread_union+THREAD_SIZE