aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_64.c
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-07-12 17:32:45 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-13 02:19:16 -0400
commit9958e810f8ac92f8a447035ee6555420ba27b847 (patch)
tree1761a7d04aebeb9b345ea8e2b74abd555a9c3926 /arch/x86/mm/init_64.c
parent965194c15dc9e4f3bc44432b39c441c86af7f11d (diff)
x86: max_low_pfn_mapped fix, #3
optimization: try to merge the range with same page size in init_memory_mapping, to get the best possible linear mappings set up. thus when GBpages is not there, we could do 2M pages. Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/init_64.c')
-rw-r--r--arch/x86/mm/init_64.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 122bcef222fc..a25cc6fa2207 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -763,6 +763,20 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
763 end_pfn = end>>PAGE_SHIFT; 763 end_pfn = end>>PAGE_SHIFT;
764 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); 764 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
765 765
766 /* try to merge same page size and continuous */
767 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
768 unsigned long old_start;
769 if (mr[i].end != mr[i+1].start ||
770 mr[i].page_size_mask != mr[i+1].page_size_mask)
771 continue;
772 /* move it */
773 old_start = mr[i].start;
774 memmove(&mr[i], &mr[i+1],
775 (nr_range - 1 - i) * sizeof (struct map_range));
776 mr[i].start = old_start;
777 nr_range--;
778 }
779
766 for (i = 0; i < nr_range; i++) 780 for (i = 0; i < nr_range; i++)
767 printk(KERN_DEBUG " %010lx - %010lx page %s\n", 781 printk(KERN_DEBUG " %010lx - %010lx page %s\n",
768 mr[i].start, mr[i].end, 782 mr[i].start, mr[i].end,