aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/init_64.c')
-rw-r--r--arch/x86/mm/init_64.c13
1 files changed, 4 insertions, 9 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 9a471be4f5f1..eabcaed76c28 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -354,17 +354,10 @@ static void __init find_early_table_space(unsigned long end)
354 * need roughly 0.5KB per GB. 354 * need roughly 0.5KB per GB.
355 */ 355 */
356 start = 0x8000; 356 start = 0x8000;
357 table_start = find_e820_area(start, end, tables); 357 table_start = find_e820_area(start, end, tables, PAGE_SIZE);
358 if (table_start == -1UL) 358 if (table_start == -1UL)
359 panic("Cannot find space for the kernel page tables"); 359 panic("Cannot find space for the kernel page tables");
360 360
361 /*
362 * When you have a lot of RAM like 256GB, early_table will not fit
363 * into 0x8000 range, find_e820_area() will find area after kernel
364 * bss but the table_start is not page aligned, so need to round it
365 * up to avoid overlap with bss:
366 */
367 table_start = round_up(table_start, PAGE_SIZE);
368 table_start >>= PAGE_SHIFT; 361 table_start >>= PAGE_SHIFT;
369 table_end = table_start; 362 table_end = table_start;
370 363
@@ -420,7 +413,9 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
420 mmu_cr4_features = read_cr4(); 413 mmu_cr4_features = read_cr4();
421 __flush_tlb_all(); 414 __flush_tlb_all();
422 415
423 reserve_early(table_start << PAGE_SHIFT, table_end << PAGE_SHIFT, "PGTABLE"); 416 if (!after_bootmem)
417 reserve_early(table_start << PAGE_SHIFT,
418 table_end << PAGE_SHIFT, "PGTABLE");
424} 419}
425 420
426#ifndef CONFIG_NUMA 421#ifndef CONFIG_NUMA