diff options
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r-- | arch/x86/mm/init.c | 117 |
1 files changed, 59 insertions, 58 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 6368b86b84e2..701abbc24735 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -36,64 +36,6 @@ struct map_range { | |||
36 | }; | 36 | }; |
37 | 37 | ||
38 | static int page_size_mask; | 38 | static int page_size_mask; |
39 | /* | ||
40 | * First calculate space needed for kernel direct mapping page tables to cover | ||
41 | * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB | ||
42 | * pages. Then find enough contiguous space for those page tables. | ||
43 | */ | ||
44 | static void __init find_early_table_space(struct map_range *mr, int nr_range) | ||
45 | { | ||
46 | int i; | ||
47 | unsigned long puds = 0, pmds = 0, ptes = 0, tables; | ||
48 | unsigned long start = 0, good_end; | ||
49 | phys_addr_t base; | ||
50 | |||
51 | for (i = 0; i < nr_range; i++) { | ||
52 | unsigned long range, extra; | ||
53 | |||
54 | range = mr[i].end - mr[i].start; | ||
55 | puds += (range + PUD_SIZE - 1) >> PUD_SHIFT; | ||
56 | |||
57 | if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) { | ||
58 | extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT); | ||
59 | pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT; | ||
60 | } else { | ||
61 | pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT; | ||
62 | } | ||
63 | |||
64 | if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) { | ||
65 | extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT); | ||
66 | #ifdef CONFIG_X86_32 | ||
67 | extra += PMD_SIZE; | ||
68 | #endif | ||
69 | ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
70 | } else { | ||
71 | ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
72 | } | ||
73 | } | ||
74 | |||
75 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); | ||
76 | tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); | ||
77 | tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); | ||
78 | |||
79 | #ifdef CONFIG_X86_32 | ||
80 | /* for fixmap */ | ||
81 | tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); | ||
82 | #endif | ||
83 | good_end = max_pfn_mapped << PAGE_SHIFT; | ||
84 | |||
85 | base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); | ||
86 | if (!base) | ||
87 | panic("Cannot find space for the kernel page tables"); | ||
88 | |||
89 | pgt_buf_start = base >> PAGE_SHIFT; | ||
90 | pgt_buf_end = pgt_buf_start; | ||
91 | pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); | ||
92 | |||
93 | printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n", | ||
94 | mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT, | ||
95 | (pgt_buf_top << PAGE_SHIFT) - 1); | ||
96 | } | ||
97 | 39 | ||
98 | void probe_page_size_mask(void) | 40 | void probe_page_size_mask(void) |
99 | { | 41 | { |
@@ -250,6 +192,65 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, | |||
250 | } | 192 | } |
251 | 193 | ||
252 | /* | 194 | /* |
195 | * First calculate space needed for kernel direct mapping page tables to cover | ||
196 | * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB | ||
197 | * pages. Then find enough contiguous space for those page tables. | ||
198 | */ | ||
199 | static void __init find_early_table_space(struct map_range *mr, int nr_range) | ||
200 | { | ||
201 | int i; | ||
202 | unsigned long puds = 0, pmds = 0, ptes = 0, tables; | ||
203 | unsigned long start = 0, good_end; | ||
204 | phys_addr_t base; | ||
205 | |||
206 | for (i = 0; i < nr_range; i++) { | ||
207 | unsigned long range, extra; | ||
208 | |||
209 | range = mr[i].end - mr[i].start; | ||
210 | puds += (range + PUD_SIZE - 1) >> PUD_SHIFT; | ||
211 | |||
212 | if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) { | ||
213 | extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT); | ||
214 | pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT; | ||
215 | } else { | ||
216 | pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT; | ||
217 | } | ||
218 | |||
219 | if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) { | ||
220 | extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT); | ||
221 | #ifdef CONFIG_X86_32 | ||
222 | extra += PMD_SIZE; | ||
223 | #endif | ||
224 | ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
225 | } else { | ||
226 | ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
227 | } | ||
228 | } | ||
229 | |||
230 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); | ||
231 | tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); | ||
232 | tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); | ||
233 | |||
234 | #ifdef CONFIG_X86_32 | ||
235 | /* for fixmap */ | ||
236 | tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); | ||
237 | #endif | ||
238 | good_end = max_pfn_mapped << PAGE_SHIFT; | ||
239 | |||
240 | base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); | ||
241 | if (!base) | ||
242 | panic("Cannot find space for the kernel page tables"); | ||
243 | |||
244 | pgt_buf_start = base >> PAGE_SHIFT; | ||
245 | pgt_buf_end = pgt_buf_start; | ||
246 | pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); | ||
247 | |||
248 | printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n", | ||
249 | mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT, | ||
250 | (pgt_buf_top << PAGE_SHIFT) - 1); | ||
251 | } | ||
252 | |||
253 | /* | ||
253 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. | 254 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. |
254 | * This runs before bootmem is initialized and gets pages directly from | 255 | * This runs before bootmem is initialized and gets pages directly from |
255 | * the physical memory. To access them they are temporarily mapped. | 256 | * the physical memory. To access them they are temporarily mapped. |