aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/head_32.S151
-rw-r--r--arch/x86/kernel/setup_32.c4
-rw-r--r--arch/x86/mm/init_32.c72
-rw-r--r--arch/x86/mm/ioremap.c55
-rw-r--r--include/asm-x86/page_32.h1
-rw-r--r--include/asm-x86/pgtable_32.h4
6 files changed, 178 insertions, 109 deletions
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 5d8c5730686b..74ef4a41f224 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -19,6 +19,10 @@
19#include <asm/thread_info.h> 19#include <asm/thread_info.h>
20#include <asm/asm-offsets.h> 20#include <asm/asm-offsets.h>
21#include <asm/setup.h> 21#include <asm/setup.h>
22#include <asm/processor-flags.h>
23
24/* Physical address */
25#define pa(X) ((X) - __PAGE_OFFSET)
22 26
23/* 27/*
24 * References to members of the new_cpu_data structure. 28 * References to members of the new_cpu_data structure.
@@ -80,10 +84,6 @@ INIT_MAP_BEYOND_END = BOOTBITMAP_SIZE + (PAGE_TABLE_SIZE + ALLOCATOR_SLOP)*PAGE_
80 */ 84 */
81.section .text.head,"ax",@progbits 85.section .text.head,"ax",@progbits
82ENTRY(startup_32) 86ENTRY(startup_32)
83 /* check to see if KEEP_SEGMENTS flag is meaningful */
84 cmpw $0x207, BP_version(%esi)
85 jb 1f
86
87 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 87 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
88 us to not reload segments */ 88 us to not reload segments */
89 testb $(1<<6), BP_loadflags(%esi) 89 testb $(1<<6), BP_loadflags(%esi)
@@ -92,7 +92,7 @@ ENTRY(startup_32)
92/* 92/*
93 * Set segments to known values. 93 * Set segments to known values.
94 */ 94 */
951: lgdt boot_gdt_descr - __PAGE_OFFSET 95 lgdt pa(boot_gdt_descr)
96 movl $(__BOOT_DS),%eax 96 movl $(__BOOT_DS),%eax
97 movl %eax,%ds 97 movl %eax,%ds
98 movl %eax,%es 98 movl %eax,%es
@@ -105,8 +105,8 @@ ENTRY(startup_32)
105 */ 105 */
106 cld 106 cld
107 xorl %eax,%eax 107 xorl %eax,%eax
108 movl $__bss_start - __PAGE_OFFSET,%edi 108 movl $pa(__bss_start),%edi
109 movl $__bss_stop - __PAGE_OFFSET,%ecx 109 movl $pa(__bss_stop),%ecx
110 subl %edi,%ecx 110 subl %edi,%ecx
111 shrl $2,%ecx 111 shrl $2,%ecx
112 rep ; stosl 112 rep ; stosl
@@ -118,31 +118,32 @@ ENTRY(startup_32)
118 * (kexec on panic case). Hence copy out the parameters before initializing 118 * (kexec on panic case). Hence copy out the parameters before initializing
119 * page tables. 119 * page tables.
120 */ 120 */
121 movl $(boot_params - __PAGE_OFFSET),%edi 121 movl $pa(boot_params),%edi
122 movl $(PARAM_SIZE/4),%ecx 122 movl $(PARAM_SIZE/4),%ecx
123 cld 123 cld
124 rep 124 rep
125 movsl 125 movsl
126 movl boot_params - __PAGE_OFFSET + NEW_CL_POINTER,%esi 126 movl pa(boot_params) + NEW_CL_POINTER,%esi
127 andl %esi,%esi 127 andl %esi,%esi
128 jz 1f # No comand line 128 jz 1f # No comand line
129 movl $(boot_command_line - __PAGE_OFFSET),%edi 129 movl $pa(boot_command_line),%edi
130 movl $(COMMAND_LINE_SIZE/4),%ecx 130 movl $(COMMAND_LINE_SIZE/4),%ecx
131 rep 131 rep
132 movsl 132 movsl
1331: 1331:
134 134
135#ifdef CONFIG_PARAVIRT 135#ifdef CONFIG_PARAVIRT
136 cmpw $0x207, (boot_params + BP_version - __PAGE_OFFSET) 136 /* This is can only trip for a broken bootloader... */
137 cmpw $0x207, pa(boot_params + BP_version)
137 jb default_entry 138 jb default_entry
138 139
139 /* Paravirt-compatible boot parameters. Look to see what architecture 140 /* Paravirt-compatible boot parameters. Look to see what architecture
140 we're booting under. */ 141 we're booting under. */
141 movl (boot_params + BP_hardware_subarch - __PAGE_OFFSET), %eax 142 movl pa(boot_params + BP_hardware_subarch), %eax
142 cmpl $num_subarch_entries, %eax 143 cmpl $num_subarch_entries, %eax
143 jae bad_subarch 144 jae bad_subarch
144 145
145 movl subarch_entries - __PAGE_OFFSET(,%eax,4), %eax 146 movl pa(subarch_entries)(,%eax,4), %eax
146 subl $__PAGE_OFFSET, %eax 147 subl $__PAGE_OFFSET, %eax
147 jmp *%eax 148 jmp *%eax
148 149
@@ -170,17 +171,68 @@ num_subarch_entries = (. - subarch_entries) / 4
170 * Mappings are created both at virtual address 0 (identity mapping) 171 * Mappings are created both at virtual address 0 (identity mapping)
171 * and PAGE_OFFSET for up to _end+sizeof(page tables)+INIT_MAP_BEYOND_END. 172 * and PAGE_OFFSET for up to _end+sizeof(page tables)+INIT_MAP_BEYOND_END.
172 * 173 *
173 * Warning: don't use %esi or the stack in this code. However, %esp 174 * Note that the stack is not yet set up!
174 * can be used as a GPR if you really need it...
175 */ 175 */
176page_pde_offset = (__PAGE_OFFSET >> 20); 176#define PTE_ATTR 0x007 /* PRESENT+RW+USER */
177#define PDE_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
178#define PGD_ATTR 0x001 /* PRESENT (no other attributes) */
177 179
178default_entry: 180default_entry:
179 movl $(pg0 - __PAGE_OFFSET), %edi 181#ifdef CONFIG_X86_PAE
180 movl $(swapper_pg_dir - __PAGE_OFFSET), %edx 182
181 movl $0x007, %eax /* 0x007 = PRESENT+RW+USER */ 183 /*
184 * In PAE mode swapper_pg_dir is statically defined to contain enough
185 * entries to cover the VMSPLIT option (that is the top 1, 2 or 3
186 * entries). The identity mapping is handled by pointing two PGD
187 * entries to the first kernel PMD.
188 *
189 * Note the upper half of each PMD or PTE are always zero at
190 * this stage.
191 */
192
193#define KPMDS ((0x100000000-__PAGE_OFFSET) >> 30) /* Number of kernel PMDs */
194
195 xorl %ebx,%ebx /* %ebx is kept at zero */
196
197 movl $pa(pg0), %edi
198 movl $pa(swapper_pg_pmd), %edx
199 movl $PTE_ATTR, %eax
20010:
201 leal PDE_ATTR(%edi),%ecx /* Create PMD entry */
202 movl %ecx,(%edx) /* Store PMD entry */
203 /* Upper half already zero */
204 addl $8,%edx
205 movl $512,%ecx
20611:
207 stosl
208 xchgl %eax,%ebx
209 stosl
210 xchgl %eax,%ebx
211 addl $0x1000,%eax
212 loop 11b
213
214 /*
215 * End condition: we must map up to and including INIT_MAP_BEYOND_END
216 * bytes beyond the end of our own page tables.
217 */
218 leal (INIT_MAP_BEYOND_END+PTE_ATTR)(%edi),%ebp
219 cmpl %ebp,%eax
220 jb 10b
2211:
222 movl %edi,pa(init_pg_tables_end)
223
224 /* Do early initialization of the fixmap area */
225 movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax
226 movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
227#else /* Not PAE */
228
229page_pde_offset = (__PAGE_OFFSET >> 20);
230
231 movl $pa(pg0), %edi
232 movl $pa(swapper_pg_dir), %edx
233 movl $PTE_ATTR, %eax
18210: 23410:
183 leal 0x007(%edi),%ecx /* Create PDE entry */ 235 leal PDE_ATTR(%edi),%ecx /* Create PDE entry */
184 movl %ecx,(%edx) /* Store identity PDE entry */ 236 movl %ecx,(%edx) /* Store identity PDE entry */
185 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ 237 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */
186 addl $4,%edx 238 addl $4,%edx
@@ -189,19 +241,20 @@ default_entry:
189 stosl 241 stosl
190 addl $0x1000,%eax 242 addl $0x1000,%eax
191 loop 11b 243 loop 11b
192 /* End condition: we must map up to and including INIT_MAP_BEYOND_END */ 244 /*
193 /* bytes beyond the end of our own page tables; the +0x007 is the attribute bits */ 245 * End condition: we must map up to and including INIT_MAP_BEYOND_END
194 leal (INIT_MAP_BEYOND_END+0x007)(%edi),%ebp 246 * bytes beyond the end of our own page tables; the +0x007 is
247 * the attribute bits
248 */
249 leal (INIT_MAP_BEYOND_END+PTE_ATTR)(%edi),%ebp
195 cmpl %ebp,%eax 250 cmpl %ebp,%eax
196 jb 10b 251 jb 10b
197 movl %edi,(init_pg_tables_end - __PAGE_OFFSET) 252 movl %edi,pa(init_pg_tables_end)
198
199 /* Do an early initialization of the fixmap area */
200 movl $(swapper_pg_dir - __PAGE_OFFSET), %edx
201 movl $(swapper_pg_pmd - __PAGE_OFFSET), %eax
202 addl $0x67, %eax /* 0x67 == _PAGE_TABLE */
203 movl %eax, 4092(%edx)
204 253
254 /* Do early initialization of the fixmap area */
255 movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax
256 movl %eax,pa(swapper_pg_dir+0xffc)
257#endif
205 jmp 3f 258 jmp 3f
206/* 259/*
207 * Non-boot CPU entry point; entered from trampoline.S 260 * Non-boot CPU entry point; entered from trampoline.S
@@ -241,7 +294,7 @@ ENTRY(startup_32_smp)
241 * NOTE! We have to correct for the fact that we're 294 * NOTE! We have to correct for the fact that we're
242 * not yet offset PAGE_OFFSET.. 295 * not yet offset PAGE_OFFSET..
243 */ 296 */
244#define cr4_bits mmu_cr4_features-__PAGE_OFFSET 297#define cr4_bits pa(mmu_cr4_features)
245 movl cr4_bits,%edx 298 movl cr4_bits,%edx
246 andl %edx,%edx 299 andl %edx,%edx
247 jz 6f 300 jz 6f
@@ -276,10 +329,10 @@ ENTRY(startup_32_smp)
276/* 329/*
277 * Enable paging 330 * Enable paging
278 */ 331 */
279 movl $swapper_pg_dir-__PAGE_OFFSET,%eax 332 movl $pa(swapper_pg_dir),%eax
280 movl %eax,%cr3 /* set the page table pointer.. */ 333 movl %eax,%cr3 /* set the page table pointer.. */
281 movl %cr0,%eax 334 movl %cr0,%eax
282 orl $0x80000000,%eax 335 orl $X86_CR0_PG,%eax
283 movl %eax,%cr0 /* ..and set paging (PG) bit */ 336 movl %eax,%cr0 /* ..and set paging (PG) bit */
284 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 337 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
2851: 3381:
@@ -552,16 +605,44 @@ ENTRY(_stext)
552 */ 605 */
553.section ".bss.page_aligned","wa" 606.section ".bss.page_aligned","wa"
554 .align PAGE_SIZE_asm 607 .align PAGE_SIZE_asm
608#ifdef CONFIG_X86_PAE
609ENTRY(swapper_pg_pmd)
610 .fill 1024*KPMDS,4,0
611#else
555ENTRY(swapper_pg_dir) 612ENTRY(swapper_pg_dir)
556 .fill 1024,4,0 613 .fill 1024,4,0
557ENTRY(swapper_pg_pmd) 614#endif
615ENTRY(swapper_pg_fixmap)
558 .fill 1024,4,0 616 .fill 1024,4,0
559ENTRY(empty_zero_page) 617ENTRY(empty_zero_page)
560 .fill 4096,1,0 618 .fill 4096,1,0
561
562/* 619/*
563 * This starts the data section. 620 * This starts the data section.
564 */ 621 */
622#ifdef CONFIG_X86_PAE
623.section ".data.page_aligned","wa"
624 /* Page-aligned for the benefit of paravirt? */
625 .align PAGE_SIZE_asm
626ENTRY(swapper_pg_dir)
627 .long pa(swapper_pg_pmd+PGD_ATTR),0 /* low identity map */
628# if KPMDS == 3
629 .long pa(swapper_pg_pmd+PGD_ATTR),0
630 .long pa(swapper_pg_pmd+PGD_ATTR+0x1000),0
631 .long pa(swapper_pg_pmd+PGD_ATTR+0x2000),0
632# elif KPMDS == 2
633 .long 0,0
634 .long pa(swapper_pg_pmd+PGD_ATTR),0
635 .long pa(swapper_pg_pmd+PGD_ATTR+0x1000),0
636# elif KPMDS == 1
637 .long 0,0
638 .long 0,0
639 .long pa(swapper_pg_pmd+PGD_ATTR),0
640# else
641# error "Kernel PMDs should be 1, 2 or 3"
642# endif
643 .align PAGE_SIZE_asm /* needs to be page-sized too */
644#endif
645
565.data 646.data
566ENTRY(stack_start) 647ENTRY(stack_start)
567 .long init_thread_union+THREAD_SIZE 648 .long init_thread_union+THREAD_SIZE
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index d1d8c347cc0b..691ab4cb167b 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -154,7 +154,11 @@ struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
154struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; 154struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
155EXPORT_SYMBOL(boot_cpu_data); 155EXPORT_SYMBOL(boot_cpu_data);
156 156
157#ifndef CONFIG_X86_PAE
157unsigned long mmu_cr4_features; 158unsigned long mmu_cr4_features;
159#else
160unsigned long mmu_cr4_features = X86_CR4_PAE;
161#endif
158 162
159/* for MCA, but anyone else can use it if they want */ 163/* for MCA, but anyone else can use it if they want */
160unsigned int machine_id; 164unsigned int machine_id;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index d1bc04006d16..54aba3cf9efe 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -46,6 +46,7 @@
46#include <asm/pgalloc.h> 46#include <asm/pgalloc.h>
47#include <asm/sections.h> 47#include <asm/sections.h>
48#include <asm/paravirt.h> 48#include <asm/paravirt.h>
49#include <asm/setup.h>
49 50
50unsigned int __VMALLOC_RESERVE = 128 << 20; 51unsigned int __VMALLOC_RESERVE = 128 << 20;
51 52
@@ -328,44 +329,38 @@ pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
328 329
329void __init native_pagetable_setup_start(pgd_t *base) 330void __init native_pagetable_setup_start(pgd_t *base)
330{ 331{
331#ifdef CONFIG_X86_PAE 332 unsigned long pfn, va;
332 int i; 333 pgd_t *pgd;
334 pud_t *pud;
335 pmd_t *pmd;
336 pte_t *pte;
333 337
334 /* 338 /*
335 * Init entries of the first-level page table to the 339 * Remove any mappings which extend past the end of physical
336 * zero page, if they haven't already been set up. 340 * memory from the boot time page table:
337 *
338 * In a normal native boot, we'll be running on a
339 * pagetable rooted in swapper_pg_dir, but not in PAE
340 * mode, so this will end up clobbering the mappings
341 * for the lower 24Mbytes of the address space,
342 * without affecting the kernel address space.
343 */ 341 */
344 for (i = 0; i < USER_PTRS_PER_PGD; i++) 342 for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
345 set_pgd(&base[i], 343 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
346 __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); 344 pgd = base + pgd_index(va);
347 345 if (!pgd_present(*pgd))
348 /* Make sure kernel address space is empty so that a pagetable 346 break;
349 will be allocated for it. */ 347
350 memset(&base[USER_PTRS_PER_PGD], 0, 348 pud = pud_offset(pgd, va);
351 KERNEL_PGD_PTRS * sizeof(pgd_t)); 349 pmd = pmd_offset(pud, va);
352#else 350 if (!pmd_present(*pmd))
351 break;
352
353 pte = pte_offset_kernel(pmd, va);
354 if (!pte_present(*pte))
355 break;
356
357 pte_clear(NULL, va, pte);
358 }
353 paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT); 359 paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT);
354#endif
355} 360}
356 361
357void __init native_pagetable_setup_done(pgd_t *base) 362void __init native_pagetable_setup_done(pgd_t *base)
358{ 363{
359#ifdef CONFIG_X86_PAE
360 /*
361 * Add low memory identity-mappings - SMP needs it when
362 * starting up on an AP from real-mode. In the non-PAE
363 * case we already have these mappings through head.S.
364 * All user-space mappings are explicitly cleared after
365 * SMP startup.
366 */
367 set_pgd(&base[0], base[USER_PTRS_PER_PGD]);
368#endif
369} 364}
370 365
371/* 366/*
@@ -374,9 +369,8 @@ void __init native_pagetable_setup_done(pgd_t *base)
374 * the boot process. 369 * the boot process.
375 * 370 *
376 * If we're booting on native hardware, this will be a pagetable 371 * If we're booting on native hardware, this will be a pagetable
377 * constructed in arch/i386/kernel/head.S, and not running in PAE mode 372 * constructed in arch/x86/kernel/head_32.S. The root of the
378 * (even if we'll end up running in PAE). The root of the pagetable 373 * pagetable will be swapper_pg_dir.
379 * will be swapper_pg_dir.
380 * 374 *
381 * If we're booting paravirtualized under a hypervisor, then there are 375 * If we're booting paravirtualized under a hypervisor, then there are
382 * more options: we may already be running PAE, and the pagetable may 376 * more options: we may already be running PAE, and the pagetable may
@@ -537,14 +531,6 @@ void __init paging_init(void)
537 531
538 load_cr3(swapper_pg_dir); 532 load_cr3(swapper_pg_dir);
539 533
540#ifdef CONFIG_X86_PAE
541 /*
542 * We will bail out later - printk doesn't work right now so
543 * the user would just see a hanging kernel.
544 */
545 if (cpu_has_pae)
546 set_in_cr4(X86_CR4_PAE);
547#endif
548 __flush_tlb_all(); 534 __flush_tlb_all();
549 535
550 kmap_init(); 536 kmap_init();
@@ -675,10 +661,6 @@ void __init mem_init(void)
675 BUG_ON((unsigned long)high_memory > VMALLOC_START); 661 BUG_ON((unsigned long)high_memory > VMALLOC_START);
676#endif /* double-sanity-check paranoia */ 662#endif /* double-sanity-check paranoia */
677 663
678#ifdef CONFIG_X86_PAE
679 if (!cpu_has_pae)
680 panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
681#endif
682 if (boot_cpu_data.wp_works_ok < 0) 664 if (boot_cpu_data.wp_works_ok < 0)
683 test_wp_bit(); 665 test_wp_bit();
684 666
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index ee6648fe6b15..1106b7f477bd 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -260,41 +260,46 @@ static int __init early_ioremap_debug_setup(char *str)
260early_param("early_ioremap_debug", early_ioremap_debug_setup); 260early_param("early_ioremap_debug", early_ioremap_debug_setup);
261 261
262static __initdata int after_paging_init; 262static __initdata int after_paging_init;
263static __initdata unsigned long bm_pte[1024] 263static __initdata pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
264 __attribute__((aligned(PAGE_SIZE))); 264 __attribute__((aligned(PAGE_SIZE)));
265 265
266static inline unsigned long * __init early_ioremap_pgd(unsigned long addr) 266static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
267{ 267{
268 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023); 268 pgd_t *pgd = &swapper_pg_dir[pgd_index(addr)];
269 pud_t *pud = pud_offset(pgd, addr);
270 pmd_t *pmd = pmd_offset(pud, addr);
271
272 return pmd;
269} 273}
270 274
271static inline unsigned long * __init early_ioremap_pte(unsigned long addr) 275static inline pte_t * __init early_ioremap_pte(unsigned long addr)
272{ 276{
273 return bm_pte + ((addr >> PAGE_SHIFT) & 1023); 277 return &bm_pte[pte_index(addr)];
274} 278}
275 279
276void __init early_ioremap_init(void) 280void __init early_ioremap_init(void)
277{ 281{
278 unsigned long *pgd; 282 pmd_t *pmd;
279 283
280 if (early_ioremap_debug) 284 if (early_ioremap_debug)
281 printk(KERN_INFO "early_ioremap_init()\n"); 285 printk(KERN_INFO "early_ioremap_init()\n");
282 286
283 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); 287 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
284 *pgd = __pa(bm_pte) | _PAGE_TABLE;
285 memset(bm_pte, 0, sizeof(bm_pte)); 288 memset(bm_pte, 0, sizeof(bm_pte));
289 set_pmd(pmd, __pmd(__pa(bm_pte) | _PAGE_TABLE));
290
286 /* 291 /*
287 * The boot-ioremap range spans multiple pgds, for which 292 * The boot-ioremap range spans multiple pmds, for which
288 * we are not prepared: 293 * we are not prepared:
289 */ 294 */
290 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) { 295 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
291 WARN_ON(1); 296 WARN_ON(1);
292 printk(KERN_WARNING "pgd %p != %p\n", 297 printk(KERN_WARNING "pmd %p != %p\n",
293 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))); 298 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
294 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 299 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
295 fix_to_virt(FIX_BTMAP_BEGIN)); 300 fix_to_virt(FIX_BTMAP_BEGIN));
296 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", 301 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
297 fix_to_virt(FIX_BTMAP_END)); 302 fix_to_virt(FIX_BTMAP_END));
298 303
299 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 304 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
300 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", 305 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
@@ -304,28 +309,29 @@ void __init early_ioremap_init(void)
304 309
305void __init early_ioremap_clear(void) 310void __init early_ioremap_clear(void)
306{ 311{
307 unsigned long *pgd; 312 pmd_t *pmd;
308 313
309 if (early_ioremap_debug) 314 if (early_ioremap_debug)
310 printk(KERN_INFO "early_ioremap_clear()\n"); 315 printk(KERN_INFO "early_ioremap_clear()\n");
311 316
312 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); 317 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
313 *pgd = 0; 318 pmd_clear(pmd);
314 paravirt_release_pt(__pa(pgd) >> PAGE_SHIFT); 319 paravirt_release_pt(__pa(pmd) >> PAGE_SHIFT);
315 __flush_tlb_all(); 320 __flush_tlb_all();
316} 321}
317 322
318void __init early_ioremap_reset(void) 323void __init early_ioremap_reset(void)
319{ 324{
320 enum fixed_addresses idx; 325 enum fixed_addresses idx;
321 unsigned long *pte, phys, addr; 326 unsigned long addr, phys;
327 pte_t *pte;
322 328
323 after_paging_init = 1; 329 after_paging_init = 1;
324 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { 330 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
325 addr = fix_to_virt(idx); 331 addr = fix_to_virt(idx);
326 pte = early_ioremap_pte(addr); 332 pte = early_ioremap_pte(addr);
327 if (*pte & _PAGE_PRESENT) { 333 if (pte_present(*pte)) {
328 phys = *pte & PAGE_MASK; 334 phys = pte_val(*pte) & PAGE_MASK;
329 set_fixmap(idx, phys); 335 set_fixmap(idx, phys);
330 } 336 }
331 } 337 }
@@ -334,7 +340,8 @@ void __init early_ioremap_reset(void)
334static void __init __early_set_fixmap(enum fixed_addresses idx, 340static void __init __early_set_fixmap(enum fixed_addresses idx,
335 unsigned long phys, pgprot_t flags) 341 unsigned long phys, pgprot_t flags)
336{ 342{
337 unsigned long *pte, addr = __fix_to_virt(idx); 343 unsigned long addr = __fix_to_virt(idx);
344 pte_t *pte;
338 345
339 if (idx >= __end_of_fixed_addresses) { 346 if (idx >= __end_of_fixed_addresses) {
340 BUG(); 347 BUG();
@@ -342,9 +349,9 @@ static void __init __early_set_fixmap(enum fixed_addresses idx,
342 } 349 }
343 pte = early_ioremap_pte(addr); 350 pte = early_ioremap_pte(addr);
344 if (pgprot_val(flags)) 351 if (pgprot_val(flags))
345 *pte = (phys & PAGE_MASK) | pgprot_val(flags); 352 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
346 else 353 else
347 *pte = 0; 354 pte_clear(NULL, addr, pte);
348 __flush_tlb_one(addr); 355 __flush_tlb_one(addr);
349} 356}
350 357
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h
index 984998a30741..5f7257fd589b 100644
--- a/include/asm-x86/page_32.h
+++ b/include/asm-x86/page_32.h
@@ -48,7 +48,6 @@ typedef unsigned long pgprotval_t;
48typedef unsigned long phys_addr_t; 48typedef unsigned long phys_addr_t;
49 49
50typedef union { pteval_t pte, pte_low; } pte_t; 50typedef union { pteval_t pte, pte_low; } pte_t;
51typedef pte_t boot_pte_t;
52 51
53#endif /* __ASSEMBLY__ */ 52#endif /* __ASSEMBLY__ */
54#endif /* CONFIG_X86_PAE */ 53#endif /* CONFIG_X86_PAE */
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index 80dd438642f6..a842c7222b1e 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -52,10 +52,6 @@ void paging_init(void);
52#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) 52#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
53#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) 53#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
54 54
55#define TWOLEVEL_PGDIR_SHIFT 22
56#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
57#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
58
59/* Just any arbitrary offset to the start of the vmalloc VM area: the 55/* Just any arbitrary offset to the start of the vmalloc VM area: the
60 * current 8MB value just means that there will be a 8MB "hole" after the 56 * current 8MB value just means that there will be a 8MB "hole" after the
61 * physical memory until the kernel virtual memory starts. That means that 57 * physical memory until the kernel virtual memory starts. That means that