aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup.c
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-07-10 23:38:26 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-11 04:24:04 -0400
commitf361a450bf1ad14e2b003217dbf3958638631265 (patch)
tree10c1e4dcc0047f6c37387cada6a0bceba088d2d2 /arch/x86/kernel/setup.c
parentf302a5bbe5eb95f3d4227d5bd0e9b92b1b125f4f (diff)
x86: introduce max_low_pfn_mapped for 64-bit
when more than 4g memory is installed, don't map the big hole below 4g. Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/setup.c')
-rw-r--r--arch/x86/kernel/setup.c22
1 files changed, 18 insertions, 4 deletions
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index a7c3471ea17c..86fc2d624270 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -713,14 +713,14 @@ void __init setup_arch(char **cmdline_p)
713 * partially used pages are not usable - thus 713 * partially used pages are not usable - thus
714 * we are rounding upwards: 714 * we are rounding upwards:
715 */ 715 */
716 max_pfn = e820_end(); 716 max_pfn = e820_end_of_ram_pfn();
717 717
718 /* preallocate 4k for mptable mpc */ 718 /* preallocate 4k for mptable mpc */
719 early_reserve_e820_mpc_new(); 719 early_reserve_e820_mpc_new();
720 /* update e820 for memory not covered by WB MTRRs */ 720 /* update e820 for memory not covered by WB MTRRs */
721 mtrr_bp_init(); 721 mtrr_bp_init();
722 if (mtrr_trim_uncached_memory(max_pfn)) 722 if (mtrr_trim_uncached_memory(max_pfn))
723 max_pfn = e820_end(); 723 max_pfn = e820_end_of_ram_pfn();
724 724
725#ifdef CONFIG_X86_32 725#ifdef CONFIG_X86_32
726 /* max_low_pfn get updated here */ 726 /* max_low_pfn get updated here */
@@ -732,12 +732,26 @@ void __init setup_arch(char **cmdline_p)
732 732
733 /* How many end-of-memory variables you have, grandma! */ 733 /* How many end-of-memory variables you have, grandma! */
734 /* need this before calling reserve_initrd */ 734 /* need this before calling reserve_initrd */
735 max_low_pfn = max_pfn; 735 if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
736 max_low_pfn = e820_end_of_low_ram_pfn();
737 else
738 max_low_pfn = max_pfn;
739
736 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; 740 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
737#endif 741#endif
738 742
739 /* max_pfn_mapped is updated here */ 743 /* max_pfn_mapped is updated here */
740 max_pfn_mapped = init_memory_mapping(0, (max_low_pfn << PAGE_SHIFT)); 744 max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
745 max_pfn_mapped = max_low_pfn_mapped;
746
747#ifdef CONFIG_X86_64
748 if (max_pfn > max_low_pfn) {
749 max_pfn_mapped = init_memory_mapping(1UL<<32,
750 max_pfn<<PAGE_SHIFT);
751 /* can we preseve max_low_pfn ?*/
752 max_low_pfn = max_pfn;
753 }
754#endif
741 755
742 /* 756 /*
743 * NOTE: On x86-32, only from this point on, fixmaps are ready for use. 757 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.