aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r--arch/x86/mm/init.c58
1 files changed, 31 insertions, 27 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index ab1f6a93b527..d7aea41563b3 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -35,40 +35,44 @@ struct map_range {
35 unsigned page_size_mask; 35 unsigned page_size_mask;
36}; 36};
37 37
38static void __init find_early_table_space(struct map_range *mr, unsigned long end, 38/*
39 int use_pse, int use_gbpages) 39 * First calculate space needed for kernel direct mapping page tables to cover
40 * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB
41 * pages. Then find enough contiguous space for those page tables.
42 */
43static void __init find_early_table_space(struct map_range *mr, int nr_range)
40{ 44{
41 unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; 45 int i;
46 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
47 unsigned long start = 0, good_end;
42 phys_addr_t base; 48 phys_addr_t base;
43 49
44 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 50 for (i = 0; i < nr_range; i++) {
45 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); 51 unsigned long range, extra;
46
47 if (use_gbpages) {
48 unsigned long extra;
49
50 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
51 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
52 } else
53 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
54 52
55 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); 53 range = mr[i].end - mr[i].start;
54 puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
56 55
57 if (use_pse) { 56 if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) {
58 unsigned long extra; 57 extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT);
58 pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT;
59 } else {
60 pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT;
61 }
59 62
60 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); 63 if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) {
64 extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT);
61#ifdef CONFIG_X86_32 65#ifdef CONFIG_X86_32
62 extra += PMD_SIZE; 66 extra += PMD_SIZE;
63#endif 67#endif
64 /* The first 2/4M doesn't use large pages. */ 68 ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
65 if (mr->start < PMD_SIZE) 69 } else {
66 extra += mr->end - mr->start; 70 ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT;
67 71 }
68 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; 72 }
69 } else
70 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
71 73
74 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
75 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
72 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); 76 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
73 77
74#ifdef CONFIG_X86_32 78#ifdef CONFIG_X86_32
@@ -86,7 +90,7 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
86 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); 90 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
87 91
88 printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n", 92 printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
89 end - 1, pgt_buf_start << PAGE_SHIFT, 93 mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT,
90 (pgt_buf_top << PAGE_SHIFT) - 1); 94 (pgt_buf_top << PAGE_SHIFT) - 1);
91} 95}
92 96
@@ -267,7 +271,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
267 * nodes are discovered. 271 * nodes are discovered.
268 */ 272 */
269 if (!after_bootmem) 273 if (!after_bootmem)
270 find_early_table_space(&mr[0], end, use_pse, use_gbpages); 274 find_early_table_space(mr, nr_range);
271 275
272 for (i = 0; i < nr_range; i++) 276 for (i = 0; i < nr_range; i++)
273 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, 277 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,