aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_64.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2009-03-05 07:55:05 -0500
committerIngo Molnar <mingo@elte.hu>2009-03-05 08:17:17 -0500
commitf765090a2617b8d9cb73b71e0aa850c29460d8be (patch)
treeb23dff6b8639a0f74f99a5206e6b7c9def588f6f /arch/x86/mm/init_64.c
parent0c0f756fd679d9747d52dad51fce3a5bb362eec3 (diff)
x86: move init_memory_mapping() to common mm/init.c
Impact: cleanup This patch moves the init_memory_mapping() function to common mm/init.c. Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Yinghai Lu <yinghai@kernel.org> LKML-Reference: <1236257708-27269-14-git-send-email-penberg@cs.helsinki.fi> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/init_64.c')
-rw-r--r--arch/x86/mm/init_64.c314
1 files changed, 7 insertions, 307 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index d101990e4635..a32fe0756088 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -61,12 +61,6 @@ static unsigned long dma_reserve __initdata;
61 61
62DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 62DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
63 63
64int direct_gbpages
65#ifdef CONFIG_DIRECT_GBPAGES
66 = 1
67#endif
68;
69
70static int __init parse_direct_gbpages_off(char *arg) 64static int __init parse_direct_gbpages_off(char *arg)
71{ 65{
72 direct_gbpages = 0; 66 direct_gbpages = 0;
@@ -87,8 +81,6 @@ early_param("gbpages", parse_direct_gbpages_on);
87 * around without checking the pgd every time. 81 * around without checking the pgd every time.
88 */ 82 */
89 83
90int after_bootmem;
91
92pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP; 84pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
93EXPORT_SYMBOL_GPL(__supported_pte_mask); 85EXPORT_SYMBOL_GPL(__supported_pte_mask);
94 86
@@ -291,9 +283,9 @@ void __init cleanup_highmap(void)
291 } 283 }
292} 284}
293 285
294static unsigned long __initdata table_start; 286extern unsigned long __initdata table_start;
295static unsigned long __meminitdata table_end; 287extern unsigned long __meminitdata table_end;
296static unsigned long __meminitdata table_top; 288extern unsigned long __meminitdata table_top;
297 289
298static __ref void *alloc_low_page(unsigned long *phys) 290static __ref void *alloc_low_page(unsigned long *phys)
299{ 291{
@@ -547,77 +539,10 @@ phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
547 return phys_pud_init(pud, addr, end, page_size_mask); 539 return phys_pud_init(pud, addr, end, page_size_mask);
548} 540}
549 541
550static void __init find_early_table_space(unsigned long end, int use_pse, 542unsigned long __meminit
551 int use_gbpages) 543kernel_physical_mapping_init(unsigned long start,
552{ 544 unsigned long end,
553 unsigned long puds, pmds, ptes, tables, start; 545 unsigned long page_size_mask)
554
555 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
556 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
557
558 if (use_gbpages) {
559 unsigned long extra;
560
561 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
562 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
563 } else
564 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
565
566 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
567
568 if (use_pse) {
569 unsigned long extra;
570
571 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
572#ifdef CONFIG_X86_32
573 extra += PMD_SIZE;
574#endif
575 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
576 } else
577 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
578
579 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
580
581#ifdef CONFIG_X86_32
582 /* for fixmap */
583 tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
584#endif
585
586 /*
587 * RED-PEN putting page tables only on node 0 could
588 * cause a hotspot and fill up ZONE_DMA. The page tables
589 * need roughly 0.5KB per GB.
590 */
591#ifdef CONFIG_X86_32
592 start = 0x7000;
593 table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
594 tables, PAGE_SIZE);
595#else /* CONFIG_X86_64 */
596 start = 0x8000;
597 table_start = find_e820_area(start, end, tables, PAGE_SIZE);
598#endif
599 if (table_start == -1UL)
600 panic("Cannot find space for the kernel page tables");
601
602 table_start >>= PAGE_SHIFT;
603 table_end = table_start;
604 table_top = table_start + (tables >> PAGE_SHIFT);
605
606 printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
607 end, table_start << PAGE_SHIFT, table_top << PAGE_SHIFT);
608}
609
610static void __init init_gbpages(void)
611{
612 if (direct_gbpages && cpu_has_gbpages)
613 printk(KERN_INFO "Using GB pages for direct mapping\n");
614 else
615 direct_gbpages = 0;
616}
617
618static unsigned long __meminit kernel_physical_mapping_init(unsigned long start,
619 unsigned long end,
620 unsigned long page_size_mask)
621{ 546{
622 547
623 unsigned long next, last_map_addr = end; 548 unsigned long next, last_map_addr = end;
@@ -654,231 +579,6 @@ static unsigned long __meminit kernel_physical_mapping_init(unsigned long start,
654 return last_map_addr; 579 return last_map_addr;
655} 580}
656 581
657struct map_range {
658 unsigned long start;
659 unsigned long end;
660 unsigned page_size_mask;
661};
662
663#ifdef CONFIG_X86_32
664#define NR_RANGE_MR 3
665#else /* CONFIG_X86_64 */
666#define NR_RANGE_MR 5
667#endif
668
669static int save_mr(struct map_range *mr, int nr_range,
670 unsigned long start_pfn, unsigned long end_pfn,
671 unsigned long page_size_mask)
672{
673 if (start_pfn < end_pfn) {
674 if (nr_range >= NR_RANGE_MR)
675 panic("run out of range for init_memory_mapping\n");
676 mr[nr_range].start = start_pfn<<PAGE_SHIFT;
677 mr[nr_range].end = end_pfn<<PAGE_SHIFT;
678 mr[nr_range].page_size_mask = page_size_mask;
679 nr_range++;
680 }
681
682 return nr_range;
683}
684
685/*
686 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
687 * This runs before bootmem is initialized and gets pages directly from
688 * the physical memory. To access them they are temporarily mapped.
689 */
690unsigned long __init_refok init_memory_mapping(unsigned long start,
691 unsigned long end)
692{
693 unsigned long page_size_mask = 0;
694 unsigned long start_pfn, end_pfn;
695 unsigned long pos;
696 unsigned long ret;
697
698 struct map_range mr[NR_RANGE_MR];
699 int nr_range, i;
700 int use_pse, use_gbpages;
701
702 printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
703
704 if (!after_bootmem)
705 init_gbpages();
706
707#ifdef CONFIG_DEBUG_PAGEALLOC
708 /*
709 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
710 * This will simplify cpa(), which otherwise needs to support splitting
711 * large pages into small in interrupt context, etc.
712 */
713 use_pse = use_gbpages = 0;
714#else
715 use_pse = cpu_has_pse;
716 use_gbpages = direct_gbpages;
717#endif
718
719#ifdef CONFIG_X86_32
720#ifdef CONFIG_X86_PAE
721 set_nx();
722 if (nx_enabled)
723 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
724#endif
725
726 /* Enable PSE if available */
727 if (cpu_has_pse)
728 set_in_cr4(X86_CR4_PSE);
729
730 /* Enable PGE if available */
731 if (cpu_has_pge) {
732 set_in_cr4(X86_CR4_PGE);
733 __supported_pte_mask |= _PAGE_GLOBAL;
734 }
735#endif
736
737 if (use_gbpages)
738 page_size_mask |= 1 << PG_LEVEL_1G;
739 if (use_pse)
740 page_size_mask |= 1 << PG_LEVEL_2M;
741
742 memset(mr, 0, sizeof(mr));
743 nr_range = 0;
744
745 /* head if not big page alignment ? */
746 start_pfn = start >> PAGE_SHIFT;
747 pos = start_pfn << PAGE_SHIFT;
748#ifdef CONFIG_X86_32
749 /*
750 * Don't use a large page for the first 2/4MB of memory
751 * because there are often fixed size MTRRs in there
752 * and overlapping MTRRs into large pages can cause
753 * slowdowns.
754 */
755 if (pos == 0)
756 end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT);
757 else
758 end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
759 << (PMD_SHIFT - PAGE_SHIFT);
760#else /* CONFIG_X86_64 */
761 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
762 << (PMD_SHIFT - PAGE_SHIFT);
763#endif
764 if (end_pfn > (end >> PAGE_SHIFT))
765 end_pfn = end >> PAGE_SHIFT;
766 if (start_pfn < end_pfn) {
767 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
768 pos = end_pfn << PAGE_SHIFT;
769 }
770
771 /* big page (2M) range */
772 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
773 << (PMD_SHIFT - PAGE_SHIFT);
774#ifdef CONFIG_X86_32
775 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
776#else /* CONFIG_X86_64 */
777 end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
778 << (PUD_SHIFT - PAGE_SHIFT);
779 if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
780 end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
781#endif
782
783 if (start_pfn < end_pfn) {
784 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
785 page_size_mask & (1<<PG_LEVEL_2M));
786 pos = end_pfn << PAGE_SHIFT;
787 }
788
789#ifdef CONFIG_X86_64
790 /* big page (1G) range */
791 start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
792 << (PUD_SHIFT - PAGE_SHIFT);
793 end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
794 if (start_pfn < end_pfn) {
795 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
796 page_size_mask &
797 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
798 pos = end_pfn << PAGE_SHIFT;
799 }
800
801 /* tail is not big page (1G) alignment */
802 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
803 << (PMD_SHIFT - PAGE_SHIFT);
804 end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
805 if (start_pfn < end_pfn) {
806 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
807 page_size_mask & (1<<PG_LEVEL_2M));
808 pos = end_pfn << PAGE_SHIFT;
809 }
810#endif
811
812 /* tail is not big page (2M) alignment */
813 start_pfn = pos>>PAGE_SHIFT;
814 end_pfn = end>>PAGE_SHIFT;
815 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
816
817 /* try to merge same page size and continuous */
818 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
819 unsigned long old_start;
820 if (mr[i].end != mr[i+1].start ||
821 mr[i].page_size_mask != mr[i+1].page_size_mask)
822 continue;
823 /* move it */
824 old_start = mr[i].start;
825 memmove(&mr[i], &mr[i+1],
826 (nr_range - 1 - i) * sizeof(struct map_range));
827 mr[i--].start = old_start;
828 nr_range--;
829 }
830
831 for (i = 0; i < nr_range; i++)
832 printk(KERN_DEBUG " %010lx - %010lx page %s\n",
833 mr[i].start, mr[i].end,
834 (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
835 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
836
837 /*
838 * Find space for the kernel direct mapping tables.
839 *
840 * Later we should allocate these tables in the local node of the
841 * memory mapped. Unfortunately this is done currently before the
842 * nodes are discovered.
843 */
844 if (!after_bootmem)
845 find_early_table_space(end, use_pse, use_gbpages);
846
847#ifdef CONFIG_X86_32
848 for (i = 0; i < nr_range; i++)
849 kernel_physical_mapping_init(
850 mr[i].start >> PAGE_SHIFT,
851 mr[i].end >> PAGE_SHIFT,
852 mr[i].page_size_mask == (1<<PG_LEVEL_2M));
853 ret = end;
854#else /* CONFIG_X86_64 */
855 for (i = 0; i < nr_range; i++)
856 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
857 mr[i].page_size_mask);
858#endif
859
860#ifdef CONFIG_X86_32
861 early_ioremap_page_table_range_init();
862
863 load_cr3(swapper_pg_dir);
864#endif
865
866#ifdef CONFIG_X86_64
867 if (!after_bootmem)
868 mmu_cr4_features = read_cr4();
869#endif
870 __flush_tlb_all();
871
872 if (!after_bootmem && table_end > table_start)
873 reserve_early(table_start << PAGE_SHIFT,
874 table_end << PAGE_SHIFT, "PGTABLE");
875
876 if (!after_bootmem)
877 early_memtest(start, end);
878
879 return ret >> PAGE_SHIFT;
880}
881
882#ifndef CONFIG_NUMA 582#ifndef CONFIG_NUMA
883void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn) 583void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn)
884{ 584{