diff options
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r-- | arch/x86/mm/init.c | 56 |
1 files changed, 14 insertions, 42 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 947f42abe820..286d289b039b 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -18,9 +18,9 @@ | |||
18 | 18 | ||
19 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 19 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
20 | 20 | ||
21 | unsigned long __initdata e820_table_start; | 21 | unsigned long __initdata pgt_buf_start; |
22 | unsigned long __meminitdata e820_table_end; | 22 | unsigned long __meminitdata pgt_buf_end; |
23 | unsigned long __meminitdata e820_table_top; | 23 | unsigned long __meminitdata pgt_buf_top; |
24 | 24 | ||
25 | int after_bootmem; | 25 | int after_bootmem; |
26 | 26 | ||
@@ -33,7 +33,7 @@ int direct_gbpages | |||
33 | static void __init find_early_table_space(unsigned long end, int use_pse, | 33 | static void __init find_early_table_space(unsigned long end, int use_pse, |
34 | int use_gbpages) | 34 | int use_gbpages) |
35 | { | 35 | { |
36 | unsigned long puds, pmds, ptes, tables, start; | 36 | unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; |
37 | phys_addr_t base; | 37 | phys_addr_t base; |
38 | 38 | ||
39 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | 39 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; |
@@ -65,29 +65,20 @@ static void __init find_early_table_space(unsigned long end, int use_pse, | |||
65 | #ifdef CONFIG_X86_32 | 65 | #ifdef CONFIG_X86_32 |
66 | /* for fixmap */ | 66 | /* for fixmap */ |
67 | tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); | 67 | tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); |
68 | #endif | ||
69 | 68 | ||
70 | /* | 69 | good_end = max_pfn_mapped << PAGE_SHIFT; |
71 | * RED-PEN putting page tables only on node 0 could | ||
72 | * cause a hotspot and fill up ZONE_DMA. The page tables | ||
73 | * need roughly 0.5KB per GB. | ||
74 | */ | ||
75 | #ifdef CONFIG_X86_32 | ||
76 | start = 0x7000; | ||
77 | #else | ||
78 | start = 0x8000; | ||
79 | #endif | 70 | #endif |
80 | base = memblock_find_in_range(start, max_pfn_mapped<<PAGE_SHIFT, | 71 | |
81 | tables, PAGE_SIZE); | 72 | base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); |
82 | if (base == MEMBLOCK_ERROR) | 73 | if (base == MEMBLOCK_ERROR) |
83 | panic("Cannot find space for the kernel page tables"); | 74 | panic("Cannot find space for the kernel page tables"); |
84 | 75 | ||
85 | e820_table_start = base >> PAGE_SHIFT; | 76 | pgt_buf_start = base >> PAGE_SHIFT; |
86 | e820_table_end = e820_table_start; | 77 | pgt_buf_end = pgt_buf_start; |
87 | e820_table_top = e820_table_start + (tables >> PAGE_SHIFT); | 78 | pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); |
88 | 79 | ||
89 | printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", | 80 | printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", |
90 | end, e820_table_start << PAGE_SHIFT, e820_table_top << PAGE_SHIFT); | 81 | end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); |
91 | } | 82 | } |
92 | 83 | ||
93 | struct map_range { | 84 | struct map_range { |
@@ -279,30 +270,11 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
279 | load_cr3(swapper_pg_dir); | 270 | load_cr3(swapper_pg_dir); |
280 | #endif | 271 | #endif |
281 | 272 | ||
282 | #ifdef CONFIG_X86_64 | ||
283 | if (!after_bootmem && !start) { | ||
284 | pud_t *pud; | ||
285 | pmd_t *pmd; | ||
286 | |||
287 | mmu_cr4_features = read_cr4(); | ||
288 | |||
289 | /* | ||
290 | * _brk_end cannot change anymore, but it and _end may be | ||
291 | * located on different 2M pages. cleanup_highmap(), however, | ||
292 | * can only consider _end when it runs, so destroy any | ||
293 | * mappings beyond _brk_end here. | ||
294 | */ | ||
295 | pud = pud_offset(pgd_offset_k(_brk_end), _brk_end); | ||
296 | pmd = pmd_offset(pud, _brk_end - 1); | ||
297 | while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1)) | ||
298 | pmd_clear(pmd); | ||
299 | } | ||
300 | #endif | ||
301 | __flush_tlb_all(); | 273 | __flush_tlb_all(); |
302 | 274 | ||
303 | if (!after_bootmem && e820_table_end > e820_table_start) | 275 | if (!after_bootmem && pgt_buf_end > pgt_buf_start) |
304 | memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, | 276 | memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT, |
305 | e820_table_end << PAGE_SHIFT, "PGTABLE"); | 277 | pgt_buf_end << PAGE_SHIFT, "PGTABLE"); |
306 | 278 | ||
307 | if (!after_bootmem) | 279 | if (!after_bootmem) |
308 | early_memtest(start, end); | 280 | early_memtest(start, end); |