aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSiddha, Suresh B <suresh.b.siddha@intel.com>2005-11-05 11:25:53 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-14 22:55:14 -0500
commitf6c2e3330d3fdd5474bc3756da46fca889a30e33 (patch)
tree41b7534c39a6aea4ae1f0a75c6eb03f6e4b6312c /arch
parent69d81fcde7797342417591ba7affb372b9c86eae (diff)
[PATCH] x86_64: Unmap NULL during early bootup
We should zap the low mappings, as soon as possible, so that we can catch kernel bugs more effectively. Previously early boot had NULL mapped and didn't trap on NULL references. This patch introduces boot_level4_pgt, which will always have low identity addresses mapped. Druing boot, all the processors will use this as their level4 pgt. On BP, we will switch to init_level4_pgt as soon as we enter C code and zap the low mappings as soon as we are done with the usage of identity low mapped addresses. On AP's we will zap the low mappings as soon as we jump to C code. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Ashok Raj <ashok.raj@intel.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/acpi/boot.c2
-rw-r--r--arch/x86_64/kernel/head.S37
-rw-r--r--arch/x86_64/kernel/head64.c8
-rw-r--r--arch/x86_64/kernel/mpparse.c2
-rw-r--r--arch/x86_64/kernel/setup.c2
-rw-r--r--arch/x86_64/kernel/setup64.c2
-rw-r--r--arch/x86_64/kernel/smpboot.c3
-rw-r--r--arch/x86_64/mm/init.c28
8 files changed, 53 insertions, 31 deletions
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 82754bb80e20..f36677241ecd 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -542,7 +542,7 @@ acpi_scan_rsdp(unsigned long start, unsigned long length)
542 * RSDP signature. 542 * RSDP signature.
543 */ 543 */
544 for (offset = 0; offset < length; offset += 16) { 544 for (offset = 0; offset < length; offset += 16) {
545 if (strncmp((char *)(start + offset), "RSD PTR ", sig_len)) 545 if (strncmp((char *)(phys_to_virt(start) + offset), "RSD PTR ", sig_len))
546 continue; 546 continue;
547 return (start + offset); 547 return (start + offset);
548 } 548 }
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index b92e5f45ed46..15290968e49d 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -12,6 +12,7 @@
12 12
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <linux/threads.h> 14#include <linux/threads.h>
15#include <linux/init.h>
15#include <asm/desc.h> 16#include <asm/desc.h>
16#include <asm/segment.h> 17#include <asm/segment.h>
17#include <asm/page.h> 18#include <asm/page.h>
@@ -70,7 +71,7 @@ startup_32:
70 movl %eax, %cr4 71 movl %eax, %cr4
71 72
72 /* Setup early boot stage 4 level pagetables */ 73 /* Setup early boot stage 4 level pagetables */
73 movl $(init_level4_pgt - __START_KERNEL_map), %eax 74 movl $(boot_level4_pgt - __START_KERNEL_map), %eax
74 movl %eax, %cr3 75 movl %eax, %cr3
75 76
76 /* Setup EFER (Extended Feature Enable Register) */ 77 /* Setup EFER (Extended Feature Enable Register) */
@@ -113,7 +114,7 @@ startup_64:
113 movq %rax, %cr4 114 movq %rax, %cr4
114 115
115 /* Setup early boot stage 4 level pagetables. */ 116 /* Setup early boot stage 4 level pagetables. */
116 movq $(init_level4_pgt - __START_KERNEL_map), %rax 117 movq $(boot_level4_pgt - __START_KERNEL_map), %rax
117 movq %rax, %cr3 118 movq %rax, %cr3
118 119
119 /* Check if nx is implemented */ 120 /* Check if nx is implemented */
@@ -240,20 +241,10 @@ ljumpvector:
240ENTRY(stext) 241ENTRY(stext)
241ENTRY(_stext) 242ENTRY(_stext)
242 243
243 /*
244 * This default setting generates an ident mapping at address 0x100000
245 * and a mapping for the kernel that precisely maps virtual address
246 * 0xffffffff80000000 to physical address 0x000000. (always using
247 * 2Mbyte large pages provided by PAE mode)
248 */
249.org 0x1000 244.org 0x1000
250ENTRY(init_level4_pgt) 245ENTRY(init_level4_pgt)
251 .quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */ 246 /* This gets initialized in x86_64_start_kernel */
252 .fill 255,8,0 247 .fill 512,8,0
253 .quad 0x000000000000a007 + __PHYSICAL_START
254 .fill 254,8,0
255 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
256 .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
257 248
258.org 0x2000 249.org 0x2000
259ENTRY(level3_ident_pgt) 250ENTRY(level3_ident_pgt)
@@ -350,6 +341,24 @@ ENTRY(wakeup_level4_pgt)
350 .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */ 341 .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
351#endif 342#endif
352 343
344#ifndef CONFIG_HOTPLUG_CPU
345 __INITDATA
346#endif
347 /*
348 * This default setting generates an ident mapping at address 0x100000
349 * and a mapping for the kernel that precisely maps virtual address
350 * 0xffffffff80000000 to physical address 0x000000. (always using
351 * 2Mbyte large pages provided by PAE mode)
352 */
353 .align PAGE_SIZE
354ENTRY(boot_level4_pgt)
355 .quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */
356 .fill 255,8,0
357 .quad 0x000000000000a007 + __PHYSICAL_START
358 .fill 254,8,0
359 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
360 .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
361
353 .data 362 .data
354 363
355 .align 16 364 .align 16
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c
index cf6ab147a2a5..e0f12d49db4c 100644
--- a/arch/x86_64/kernel/head64.c
+++ b/arch/x86_64/kernel/head64.c
@@ -19,6 +19,7 @@
19#include <asm/bootsetup.h> 19#include <asm/bootsetup.h>
20#include <asm/setup.h> 20#include <asm/setup.h>
21#include <asm/desc.h> 21#include <asm/desc.h>
22#include <asm/pgtable.h>
22 23
23/* Don't add a printk in there. printk relies on the PDA which is not initialized 24/* Don't add a printk in there. printk relies on the PDA which is not initialized
24 yet. */ 25 yet. */
@@ -86,6 +87,13 @@ void __init x86_64_start_kernel(char * real_mode_data)
86 set_intr_gate(i, early_idt_handler); 87 set_intr_gate(i, early_idt_handler);
87 asm volatile("lidt %0" :: "m" (idt_descr)); 88 asm volatile("lidt %0" :: "m" (idt_descr));
88 clear_bss(); 89 clear_bss();
90
91 /*
92 * switch to init_level4_pgt from boot_level4_pgt
93 */
94 memcpy(init_level4_pgt, boot_level4_pgt, PTRS_PER_PGD*sizeof(pgd_t));
95 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
96
89 pda_init(0); 97 pda_init(0);
90 copy_bootdata(real_mode_data); 98 copy_bootdata(real_mode_data);
91#ifdef CONFIG_SMP 99#ifdef CONFIG_SMP
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 8f6958e79455..4a581d1cefbd 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -549,7 +549,7 @@ void __init get_smp_config (void)
549 * Read the physical hardware table. Anything here will 549 * Read the physical hardware table. Anything here will
550 * override the defaults. 550 * override the defaults.
551 */ 551 */
552 if (!smp_read_mpc((void *)(unsigned long)mpf->mpf_physptr)) { 552 if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
553 smp_found_config = 0; 553 smp_found_config = 0;
554 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); 554 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
555 printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); 555 printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 99cfa751949a..f76f3743f5d7 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -571,6 +571,8 @@ void __init setup_arch(char **cmdline_p)
571 571
572 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); 572 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
573 573
574 zap_low_mappings(0);
575
574#ifdef CONFIG_ACPI 576#ifdef CONFIG_ACPI
575 /* 577 /*
576 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT). 578 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index 79190891fbc5..06dc354375c3 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -141,7 +141,6 @@ void pda_init(int cpu)
141 panic("cannot allocate irqstack for cpu %d", cpu); 141 panic("cannot allocate irqstack for cpu %d", cpu);
142 } 142 }
143 143
144 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
145 144
146 pda->irqstackptr += IRQSTACKSIZE-64; 145 pda->irqstackptr += IRQSTACKSIZE-64;
147} 146}
@@ -197,6 +196,7 @@ void __cpuinit cpu_init (void)
197 /* CPU 0 is initialised in head64.c */ 196 /* CPU 0 is initialised in head64.c */
198 if (cpu != 0) { 197 if (cpu != 0) {
199 pda_init(cpu); 198 pda_init(cpu);
199 zap_low_mappings(cpu);
200 } else 200 } else
201 estacks = boot_exception_stacks; 201 estacks = boot_exception_stacks;
202 202
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 658a81b33f3b..3393fc08823b 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -1063,9 +1063,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
1063 */ 1063 */
1064void __init smp_cpus_done(unsigned int max_cpus) 1064void __init smp_cpus_done(unsigned int max_cpus)
1065{ 1065{
1066#ifndef CONFIG_HOTPLUG_CPU
1067 zap_low_mappings();
1068#endif
1069 smp_cleanup_boot(); 1066 smp_cleanup_boot();
1070 1067
1071#ifdef CONFIG_X86_IO_APIC 1068#ifdef CONFIG_X86_IO_APIC
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 2b1d6c382396..be483a1d7b54 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -312,12 +312,19 @@ void __init init_memory_mapping(unsigned long start, unsigned long end)
312 312
313extern struct x8664_pda cpu_pda[NR_CPUS]; 313extern struct x8664_pda cpu_pda[NR_CPUS];
314 314
315/* Assumes all CPUs still execute in init_mm */ 315void __cpuinit zap_low_mappings(int cpu)
316void zap_low_mappings(void)
317{ 316{
318 pgd_t *pgd = pgd_offset_k(0UL); 317 if (cpu == 0) {
319 pgd_clear(pgd); 318 pgd_t *pgd = pgd_offset_k(0UL);
320 flush_tlb_all(); 319 pgd_clear(pgd);
320 } else {
321 /*
322 * For AP's, zap the low identity mappings by changing the cr3
323 * to init_level4_pgt and doing local flush tlb all
324 */
325 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
326 }
327 __flush_tlb_all();
321} 328}
322 329
323/* Compute zone sizes for the DMA and DMA32 zones in a node. */ 330/* Compute zone sizes for the DMA and DMA32 zones in a node. */
@@ -474,14 +481,13 @@ void __init mem_init(void)
474 datasize >> 10, 481 datasize >> 10,
475 initsize >> 10); 482 initsize >> 10);
476 483
484#ifdef CONFIG_SMP
477 /* 485 /*
478 * Subtle. SMP is doing its boot stuff late (because it has to 486 * Sync boot_level4_pgt mappings with the init_level4_pgt
479 * fork idle threads) - but it also needs low mappings for the 487 * except for the low identity mappings which are already zapped
480 * protected-mode entry to work. We zap these entries only after 488 * in init_level4_pgt. This sync-up is essential for AP's bringup
481 * the WP-bit has been tested.
482 */ 489 */
483#ifndef CONFIG_SMP 490 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
484 zap_low_mappings();
485#endif 491#endif
486} 492}
487 493