diff options
author | Yinghai Lu <yinghai@kernel.org> | 2012-11-16 22:39:06 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2012-11-17 14:59:29 -0500 |
commit | 719272c45b821d38608fc333700bde1a89c56c59 (patch) | |
tree | cf215602048fcb36c8fc50c731db986d5cea51aa /arch/x86/mm/init.c | |
parent | ddd3509df8f8d4f1cf4784f559d702ce00dc8846 (diff) |
x86, mm: only call early_ioremap_page_table_range_init() once
On 32bit, before patcheset that only set page table for ram, we only
call that one time.
Now, we are calling that during every init_memory_mapping if we have holes
under max_low_pfn.
We should only call it one time after all ranges under max_low_page get
mapped just like we did before.
Also that could avoid the risk to run out of pgt_buf in BRK.
Need to update page_table_range_init() to count the pages for kmap page table
at first, and use new added alloc_low_pages() to get pages in sequence.
That will conform to the requirement that pages need to be in low to high order.
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1353123563-3103-30-git-send-email-yinghai@kernel.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r-- | arch/x86/mm/init.c | 13 |
1 files changed, 5 insertions, 8 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index cb4f8ba70ecc..bed4888c6f4f 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -343,14 +343,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
343 | ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, | 343 | ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, |
344 | mr[i].page_size_mask); | 344 | mr[i].page_size_mask); |
345 | 345 | ||
346 | #ifdef CONFIG_X86_32 | ||
347 | early_ioremap_page_table_range_init(); | ||
348 | |||
349 | load_cr3(swapper_pg_dir); | ||
350 | #endif | ||
351 | |||
352 | __flush_tlb_all(); | ||
353 | |||
354 | add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT); | 346 | add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT); |
355 | 347 | ||
356 | return ret >> PAGE_SHIFT; | 348 | return ret >> PAGE_SHIFT; |
@@ -447,7 +439,12 @@ void __init init_mem_mapping(void) | |||
447 | /* can we preseve max_low_pfn ?*/ | 439 | /* can we preseve max_low_pfn ?*/ |
448 | max_low_pfn = max_pfn; | 440 | max_low_pfn = max_pfn; |
449 | } | 441 | } |
442 | #else | ||
443 | early_ioremap_page_table_range_init(); | ||
444 | load_cr3(swapper_pg_dir); | ||
445 | __flush_tlb_all(); | ||
450 | #endif | 446 | #endif |
447 | |||
451 | early_memtest(0, max_pfn_mapped << PAGE_SHIFT); | 448 | early_memtest(0, max_pfn_mapped << PAGE_SHIFT); |
452 | } | 449 | } |
453 | 450 | ||