diff options
author | Yinghai Lu <yinghai@kernel.org> | 2012-11-16 22:38:45 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2012-11-17 14:59:07 -0500 |
commit | ab9519376e86fbbf3c64e5a2b8b005958ea3e9cc (patch) | |
tree | fd740f3abd4a8c2114a68039c0601e3a7784a4bd /arch/x86/mm/init.c | |
parent | c14fa0b63b5b4234667c03fdc3314c0881caa514 (diff) |
x86, mm: Separate out calculate_table_space_size()
It should take physical address range that will need to be mapped.
find_early_table_space should take range that pgt buff should be in.
Separating page table size calculating and finding early page table to
reduce confusing.
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1353123563-3103-9-git-send-email-yinghai@kernel.org
Reviewed-by: Pekka Enberg <penberg@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r-- | arch/x86/mm/init.c | 38 |
1 files changed, 27 insertions, 11 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 1ce0d033fafc..7b961d0b1389 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -196,12 +196,10 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, | |||
196 | * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB | 196 | * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB |
197 | * pages. Then find enough contiguous space for those page tables. | 197 | * pages. Then find enough contiguous space for those page tables. |
198 | */ | 198 | */ |
199 | static void __init find_early_table_space(unsigned long start, unsigned long end) | 199 | static unsigned long __init calculate_table_space_size(unsigned long start, unsigned long end) |
200 | { | 200 | { |
201 | int i; | 201 | int i; |
202 | unsigned long puds = 0, pmds = 0, ptes = 0, tables; | 202 | unsigned long puds = 0, pmds = 0, ptes = 0, tables; |
203 | unsigned long good_end; | ||
204 | phys_addr_t base; | ||
205 | struct map_range mr[NR_RANGE_MR]; | 203 | struct map_range mr[NR_RANGE_MR]; |
206 | int nr_range; | 204 | int nr_range; |
207 | 205 | ||
@@ -240,9 +238,17 @@ static void __init find_early_table_space(unsigned long start, unsigned long end | |||
240 | #ifdef CONFIG_X86_32 | 238 | #ifdef CONFIG_X86_32 |
241 | /* for fixmap */ | 239 | /* for fixmap */ |
242 | tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); | 240 | tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); |
243 | good_end = max_pfn_mapped << PAGE_SHIFT; | ||
244 | #endif | 241 | #endif |
245 | 242 | ||
243 | return tables; | ||
244 | } | ||
245 | |||
246 | static void __init find_early_table_space(unsigned long start, | ||
247 | unsigned long good_end, | ||
248 | unsigned long tables) | ||
249 | { | ||
250 | phys_addr_t base; | ||
251 | |||
246 | base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); | 252 | base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); |
247 | if (!base) | 253 | if (!base) |
248 | panic("Cannot find space for the kernel page tables"); | 254 | panic("Cannot find space for the kernel page tables"); |
@@ -250,10 +256,6 @@ static void __init find_early_table_space(unsigned long start, unsigned long end | |||
250 | pgt_buf_start = base >> PAGE_SHIFT; | 256 | pgt_buf_start = base >> PAGE_SHIFT; |
251 | pgt_buf_end = pgt_buf_start; | 257 | pgt_buf_end = pgt_buf_start; |
252 | pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); | 258 | pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); |
253 | |||
254 | printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n", | ||
255 | mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT, | ||
256 | (pgt_buf_top << PAGE_SHIFT) - 1); | ||
257 | } | 259 | } |
258 | 260 | ||
259 | /* | 261 | /* |
@@ -291,6 +293,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
291 | 293 | ||
292 | void __init init_mem_mapping(void) | 294 | void __init init_mem_mapping(void) |
293 | { | 295 | { |
296 | unsigned long tables, good_end, end; | ||
297 | |||
294 | probe_page_size_mask(); | 298 | probe_page_size_mask(); |
295 | 299 | ||
296 | /* | 300 | /* |
@@ -301,10 +305,18 @@ void __init init_mem_mapping(void) | |||
301 | * nodes are discovered. | 305 | * nodes are discovered. |
302 | */ | 306 | */ |
303 | #ifdef CONFIG_X86_64 | 307 | #ifdef CONFIG_X86_64 |
304 | find_early_table_space(0, max_pfn<<PAGE_SHIFT); | 308 | end = max_pfn << PAGE_SHIFT; |
309 | good_end = end; | ||
305 | #else | 310 | #else |
306 | find_early_table_space(0, max_low_pfn<<PAGE_SHIFT); | 311 | end = max_low_pfn << PAGE_SHIFT; |
312 | good_end = max_pfn_mapped << PAGE_SHIFT; | ||
307 | #endif | 313 | #endif |
314 | tables = calculate_table_space_size(0, end); | ||
315 | find_early_table_space(0, good_end, tables); | ||
316 | printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx] prealloc\n", | ||
317 | end - 1, pgt_buf_start << PAGE_SHIFT, | ||
318 | (pgt_buf_top << PAGE_SHIFT) - 1); | ||
319 | |||
308 | max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT); | 320 | max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT); |
309 | max_pfn_mapped = max_low_pfn_mapped; | 321 | max_pfn_mapped = max_low_pfn_mapped; |
310 | 322 | ||
@@ -331,9 +343,13 @@ void __init init_mem_mapping(void) | |||
331 | * RO all the pagetable pages, including the ones that are beyond | 343 | * RO all the pagetable pages, including the ones that are beyond |
332 | * pgt_buf_end at that time. | 344 | * pgt_buf_end at that time. |
333 | */ | 345 | */ |
334 | if (pgt_buf_end > pgt_buf_start) | 346 | if (pgt_buf_end > pgt_buf_start) { |
347 | printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx] final\n", | ||
348 | end - 1, pgt_buf_start << PAGE_SHIFT, | ||
349 | (pgt_buf_end << PAGE_SHIFT) - 1); | ||
335 | x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start), | 350 | x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start), |
336 | PFN_PHYS(pgt_buf_end)); | 351 | PFN_PHYS(pgt_buf_end)); |
352 | } | ||
337 | 353 | ||
338 | /* stop the wrong using */ | 354 | /* stop the wrong using */ |
339 | pgt_buf_top = 0; | 355 | pgt_buf_top = 0; |