aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r--arch/x86/mm/init.c59
1 files changed, 39 insertions, 20 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index ce32017c5e38..742d6d4ad9eb 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -418,27 +418,27 @@ static unsigned long __init get_new_step_size(unsigned long step_size)
418 return step_size << 5; 418 return step_size << 5;
419} 419}
420 420
421void __init init_mem_mapping(void) 421/**
422 * memory_map_top_down - Map [map_start, map_end) top down
423 * @map_start: start address of the target memory range
424 * @map_end: end address of the target memory range
425 *
426 * This function will setup direct mapping for memory range
427 * [map_start, map_end) in top-down. That said, the page tables
428 * will be allocated at the end of the memory, and we map the
429 * memory in top-down.
430 */
431static void __init memory_map_top_down(unsigned long map_start,
432 unsigned long map_end)
422{ 433{
423 unsigned long end, real_end, start, last_start; 434 unsigned long real_end, start, last_start;
424 unsigned long step_size; 435 unsigned long step_size;
425 unsigned long addr; 436 unsigned long addr;
426 unsigned long mapped_ram_size = 0; 437 unsigned long mapped_ram_size = 0;
427 unsigned long new_mapped_ram_size; 438 unsigned long new_mapped_ram_size;
428 439
429 probe_page_size_mask();
430
431#ifdef CONFIG_X86_64
432 end = max_pfn << PAGE_SHIFT;
433#else
434 end = max_low_pfn << PAGE_SHIFT;
435#endif
436
437 /* the ISA range is always mapped regardless of memory holes */
438 init_memory_mapping(0, ISA_END_ADDRESS);
439
440 /* xen has big range in reserved near end of ram, skip it at first.*/ 440 /* xen has big range in reserved near end of ram, skip it at first.*/
441 addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, PMD_SIZE); 441 addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
442 real_end = addr + PMD_SIZE; 442 real_end = addr + PMD_SIZE;
443 443
444 /* step_size need to be small so pgt_buf from BRK could cover it */ 444 /* step_size need to be small so pgt_buf from BRK could cover it */
@@ -453,13 +453,13 @@ void __init init_mem_mapping(void)
453 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages 453 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
454 * for page table. 454 * for page table.
455 */ 455 */
456 while (last_start > ISA_END_ADDRESS) { 456 while (last_start > map_start) {
457 if (last_start > step_size) { 457 if (last_start > step_size) {
458 start = round_down(last_start - 1, step_size); 458 start = round_down(last_start - 1, step_size);
459 if (start < ISA_END_ADDRESS) 459 if (start < map_start)
460 start = ISA_END_ADDRESS; 460 start = map_start;
461 } else 461 } else
462 start = ISA_END_ADDRESS; 462 start = map_start;
463 new_mapped_ram_size = init_range_memory_mapping(start, 463 new_mapped_ram_size = init_range_memory_mapping(start,
464 last_start); 464 last_start);
465 last_start = start; 465 last_start = start;
@@ -470,8 +470,27 @@ void __init init_mem_mapping(void)
470 mapped_ram_size += new_mapped_ram_size; 470 mapped_ram_size += new_mapped_ram_size;
471 } 471 }
472 472
473 if (real_end < end) 473 if (real_end < map_end)
474 init_range_memory_mapping(real_end, end); 474 init_range_memory_mapping(real_end, map_end);
475}
476
477void __init init_mem_mapping(void)
478{
479 unsigned long end;
480
481 probe_page_size_mask();
482
483#ifdef CONFIG_X86_64
484 end = max_pfn << PAGE_SHIFT;
485#else
486 end = max_low_pfn << PAGE_SHIFT;
487#endif
488
489 /* the ISA range is always mapped regardless of memory holes */
490 init_memory_mapping(0, ISA_END_ADDRESS);
491
492 /* setup direct mapping for range [ISA_END_ADDRESS, end) in top-down*/
493 memory_map_top_down(ISA_END_ADDRESS, end);
475 494
476#ifdef CONFIG_X86_64 495#ifdef CONFIG_X86_64
477 if (max_pfn > max_low_pfn) { 496 if (max_pfn > max_low_pfn) {