aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2012-11-16 22:38:44 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2012-11-17 14:59:06 -0500
commitc14fa0b63b5b4234667c03fdc3314c0881caa514 (patch)
tree15efa536cda6bd4b860d4827ba5f2f155c4e678d
parent84f1ae30bb68d8da98bca7ff2c2b825b2ac8c9a5 (diff)
x86, mm: Find early page table buffer together
We should not do that in every calling of init_memory_mapping. At the same time need to move down early_memtest, and could remove after_bootmem checking. -v2: fix one early_memtest with 32bit by passing max_pfn_mapped instead. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1353123563-3103-8-git-send-email-yinghai@kernel.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/x86/mm/init.c66
1 files changed, 34 insertions, 32 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 51f919febf64..1ce0d033fafc 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -274,16 +274,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
274 memset(mr, 0, sizeof(mr)); 274 memset(mr, 0, sizeof(mr));
275 nr_range = split_mem_range(mr, 0, start, end); 275 nr_range = split_mem_range(mr, 0, start, end);
276 276
277 /*
278 * Find space for the kernel direct mapping tables.
279 *
280 * Later we should allocate these tables in the local node of the
281 * memory mapped. Unfortunately this is done currently before the
282 * nodes are discovered.
283 */
284 if (!after_bootmem)
285 find_early_table_space(start, end);
286
287 for (i = 0; i < nr_range; i++) 277 for (i = 0; i < nr_range; i++)
288 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, 278 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
289 mr[i].page_size_mask); 279 mr[i].page_size_mask);
@@ -296,6 +286,36 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
296 286
297 __flush_tlb_all(); 287 __flush_tlb_all();
298 288
289 return ret >> PAGE_SHIFT;
290}
291
292void __init init_mem_mapping(void)
293{
294 probe_page_size_mask();
295
296 /*
297 * Find space for the kernel direct mapping tables.
298 *
299 * Later we should allocate these tables in the local node of the
300 * memory mapped. Unfortunately this is done currently before the
301 * nodes are discovered.
302 */
303#ifdef CONFIG_X86_64
304 find_early_table_space(0, max_pfn<<PAGE_SHIFT);
305#else
306 find_early_table_space(0, max_low_pfn<<PAGE_SHIFT);
307#endif
308 max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
309 max_pfn_mapped = max_low_pfn_mapped;
310
311#ifdef CONFIG_X86_64
312 if (max_pfn > max_low_pfn) {
313 max_pfn_mapped = init_memory_mapping(1UL<<32,
314 max_pfn<<PAGE_SHIFT);
315 /* can we preseve max_low_pfn ?*/
316 max_low_pfn = max_pfn;
317 }
318#endif
299 /* 319 /*
300 * Reserve the kernel pagetable pages we used (pgt_buf_start - 320 * Reserve the kernel pagetable pages we used (pgt_buf_start -
301 * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top) 321 * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
@@ -311,32 +331,14 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
311 * RO all the pagetable pages, including the ones that are beyond 331 * RO all the pagetable pages, including the ones that are beyond
312 * pgt_buf_end at that time. 332 * pgt_buf_end at that time.
313 */ 333 */
314 if (!after_bootmem && pgt_buf_end > pgt_buf_start) 334 if (pgt_buf_end > pgt_buf_start)
315 x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start), 335 x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start),
316 PFN_PHYS(pgt_buf_end)); 336 PFN_PHYS(pgt_buf_end));
317 337
318 if (!after_bootmem) 338 /* stop the wrong using */
319 early_memtest(start, end); 339 pgt_buf_top = 0;
320 340
321 return ret >> PAGE_SHIFT; 341 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
322}
323
324void __init init_mem_mapping(void)
325{
326 probe_page_size_mask();
327
328 /* max_pfn_mapped is updated here */
329 max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
330 max_pfn_mapped = max_low_pfn_mapped;
331
332#ifdef CONFIG_X86_64
333 if (max_pfn > max_low_pfn) {
334 max_pfn_mapped = init_memory_mapping(1UL<<32,
335 max_pfn<<PAGE_SHIFT);
336 /* can we preseve max_low_pfn ?*/
337 max_low_pfn = max_pfn;
338 }
339#endif
340} 342}
341 343
342/* 344/*