aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/mm
diff options
context:
space:
mode:
authorSiddha, Suresh B <suresh.b.siddha@intel.com>2005-11-05 11:25:53 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-14 22:55:14 -0500
commitf6c2e3330d3fdd5474bc3756da46fca889a30e33 (patch)
tree41b7534c39a6aea4ae1f0a75c6eb03f6e4b6312c /arch/x86_64/mm
parent69d81fcde7797342417591ba7affb372b9c86eae (diff)
[PATCH] x86_64: Unmap NULL during early bootup
We should zap the low mappings, as soon as possible, so that we can catch kernel bugs more effectively. Previously early boot had NULL mapped and didn't trap on NULL references. This patch introduces boot_level4_pgt, which will always have low identity addresses mapped. Druing boot, all the processors will use this as their level4 pgt. On BP, we will switch to init_level4_pgt as soon as we enter C code and zap the low mappings as soon as we are done with the usage of identity low mapped addresses. On AP's we will zap the low mappings as soon as we jump to C code. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Ashok Raj <ashok.raj@intel.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/mm')
-rw-r--r--arch/x86_64/mm/init.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 2b1d6c382396..be483a1d7b54 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -312,12 +312,19 @@ void __init init_memory_mapping(unsigned long start, unsigned long end)
312 312
313extern struct x8664_pda cpu_pda[NR_CPUS]; 313extern struct x8664_pda cpu_pda[NR_CPUS];
314 314
315/* Assumes all CPUs still execute in init_mm */ 315void __cpuinit zap_low_mappings(int cpu)
316void zap_low_mappings(void)
317{ 316{
318 pgd_t *pgd = pgd_offset_k(0UL); 317 if (cpu == 0) {
319 pgd_clear(pgd); 318 pgd_t *pgd = pgd_offset_k(0UL);
320 flush_tlb_all(); 319 pgd_clear(pgd);
320 } else {
321 /*
322 * For AP's, zap the low identity mappings by changing the cr3
323 * to init_level4_pgt and doing local flush tlb all
324 */
325 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
326 }
327 __flush_tlb_all();
321} 328}
322 329
323/* Compute zone sizes for the DMA and DMA32 zones in a node. */ 330/* Compute zone sizes for the DMA and DMA32 zones in a node. */
@@ -474,14 +481,13 @@ void __init mem_init(void)
474 datasize >> 10, 481 datasize >> 10,
475 initsize >> 10); 482 initsize >> 10);
476 483
484#ifdef CONFIG_SMP
477 /* 485 /*
478 * Subtle. SMP is doing its boot stuff late (because it has to 486 * Sync boot_level4_pgt mappings with the init_level4_pgt
479 * fork idle threads) - but it also needs low mappings for the 487 * except for the low identity mappings which are already zapped
480 * protected-mode entry to work. We zap these entries only after 488 * in init_level4_pgt. This sync-up is essential for AP's bringup
481 * the WP-bit has been tested.
482 */ 489 */
483#ifndef CONFIG_SMP 490 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
484 zap_low_mappings();
485#endif 491#endif
486} 492}
487 493