aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2012-11-16 22:38:41 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2012-11-17 14:59:03 -0500
commit22ddfcaa0dbae992332381d41b8a1fbc72269a13 (patch)
tree01791d16254b54f6aad4b5381675b4269bda4d45
parent2086fe1159a9a75233b533986ccfcbd192bd9372 (diff)
x86, mm: Move init_memory_mapping calling out of setup.c
Now init_memory_mapping is called two times, later will be called for every ram ranges. Could put all related init_mem calling together and out of setup.c. Actually, it reverts commit 1bbbbe7 x86: Exclude E820_RESERVED regions and memory holes above 4 GB from direct mapping. will address that later with complete solution include handling hole under 4g. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1353123563-3103-5-git-send-email-yinghai@kernel.org Reviewed-by: Pekka Enberg <penberg@kernel.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/x86/include/asm/init.h1
-rw-r--r--arch/x86/include/asm/pgtable.h2
-rw-r--r--arch/x86/kernel/setup.c27
-rw-r--r--arch/x86/mm/init.c19
4 files changed, 20 insertions, 29 deletions
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index adcc0ae73d09..4f13998be59a 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -12,7 +12,6 @@ kernel_physical_mapping_init(unsigned long start,
12 unsigned long end, 12 unsigned long end,
13 unsigned long page_size_mask); 13 unsigned long page_size_mask);
14 14
15
16extern unsigned long __initdata pgt_buf_start; 15extern unsigned long __initdata pgt_buf_start;
17extern unsigned long __meminitdata pgt_buf_end; 16extern unsigned long __meminitdata pgt_buf_end;
18extern unsigned long __meminitdata pgt_buf_top; 17extern unsigned long __meminitdata pgt_buf_top;
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 98ac76dc4eae..dd1a88832d25 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -602,7 +602,7 @@ static inline int pgd_none(pgd_t pgd)
602#ifndef __ASSEMBLY__ 602#ifndef __ASSEMBLY__
603 603
604extern int direct_gbpages; 604extern int direct_gbpages;
605void probe_page_size_mask(void); 605void init_mem_mapping(void);
606 606
607/* local pte updates need not use xchg for locking */ 607/* local pte updates need not use xchg for locking */
608static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) 608static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 01fb5f9baf90..23b079fb93fc 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -913,34 +913,9 @@ void __init setup_arch(char **cmdline_p)
913 setup_real_mode(); 913 setup_real_mode();
914 914
915 init_gbpages(); 915 init_gbpages();
916 probe_page_size_mask();
917 916
918 /* max_pfn_mapped is updated here */ 917 init_mem_mapping();
919 max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
920 max_pfn_mapped = max_low_pfn_mapped;
921 918
922#ifdef CONFIG_X86_64
923 if (max_pfn > max_low_pfn) {
924 int i;
925 unsigned long start, end;
926 unsigned long start_pfn, end_pfn;
927
928 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn,
929 NULL) {
930
931 end = PFN_PHYS(end_pfn);
932 if (end <= (1UL<<32))
933 continue;
934
935 start = PFN_PHYS(start_pfn);
936 max_pfn_mapped = init_memory_mapping(
937 max((1UL<<32), start), end);
938 }
939
940 /* can we preseve max_low_pfn ?*/
941 max_low_pfn = max_pfn;
942 }
943#endif
944 memblock.current_limit = get_max_mapped(); 919 memblock.current_limit = get_max_mapped();
945 dma_contiguous_reserve(0); 920 dma_contiguous_reserve(0);
946 921
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 701abbc24735..9e17f9e18a21 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -37,7 +37,7 @@ struct map_range {
37 37
38static int page_size_mask; 38static int page_size_mask;
39 39
40void probe_page_size_mask(void) 40static void __init probe_page_size_mask(void)
41{ 41{
42#if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK) 42#if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK)
43 /* 43 /*
@@ -315,6 +315,23 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
315 return ret >> PAGE_SHIFT; 315 return ret >> PAGE_SHIFT;
316} 316}
317 317
318void __init init_mem_mapping(void)
319{
320 probe_page_size_mask();
321
322 /* max_pfn_mapped is updated here */
323 max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
324 max_pfn_mapped = max_low_pfn_mapped;
325
326#ifdef CONFIG_X86_64
327 if (max_pfn > max_low_pfn) {
328 max_pfn_mapped = init_memory_mapping(1UL<<32,
329 max_pfn<<PAGE_SHIFT);
330 /* can we preseve max_low_pfn ?*/
331 max_low_pfn = max_pfn;
332 }
333#endif
334}
318 335
319/* 336/*
320 * devmem_is_allowed() checks to see if /dev/mem access to a certain address 337 * devmem_is_allowed() checks to see if /dev/mem access to a certain address