aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/init_64.c')
-rw-r--r--arch/x86/mm/init_64.c381
1 files changed, 60 insertions, 321 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index ea5ad1e3672d..66d6be85df82 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -48,6 +48,7 @@
48#include <asm/kdebug.h> 48#include <asm/kdebug.h>
49#include <asm/numa.h> 49#include <asm/numa.h>
50#include <asm/cacheflush.h> 50#include <asm/cacheflush.h>
51#include <asm/init.h>
51 52
52/* 53/*
53 * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. 54 * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
@@ -61,12 +62,6 @@ static unsigned long dma_reserve __initdata;
61 62
62DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 63DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
63 64
64int direct_gbpages
65#ifdef CONFIG_DIRECT_GBPAGES
66 = 1
67#endif
68;
69
70static int __init parse_direct_gbpages_off(char *arg) 65static int __init parse_direct_gbpages_off(char *arg)
71{ 66{
72 direct_gbpages = 0; 67 direct_gbpages = 0;
@@ -87,8 +82,6 @@ early_param("gbpages", parse_direct_gbpages_on);
87 * around without checking the pgd every time. 82 * around without checking the pgd every time.
88 */ 83 */
89 84
90int after_bootmem;
91
92pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP; 85pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
93EXPORT_SYMBOL_GPL(__supported_pte_mask); 86EXPORT_SYMBOL_GPL(__supported_pte_mask);
94 87
@@ -168,34 +161,51 @@ static __ref void *spp_getpage(void)
168 return ptr; 161 return ptr;
169} 162}
170 163
171void 164static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
172set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
173{ 165{
174 pud_t *pud; 166 if (pgd_none(*pgd)) {
175 pmd_t *pmd; 167 pud_t *pud = (pud_t *)spp_getpage();
176 pte_t *pte; 168 pgd_populate(&init_mm, pgd, pud);
169 if (pud != pud_offset(pgd, 0))
170 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
171 pud, pud_offset(pgd, 0));
172 }
173 return pud_offset(pgd, vaddr);
174}
177 175
178 pud = pud_page + pud_index(vaddr); 176static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
177{
179 if (pud_none(*pud)) { 178 if (pud_none(*pud)) {
180 pmd = (pmd_t *) spp_getpage(); 179 pmd_t *pmd = (pmd_t *) spp_getpage();
181 pud_populate(&init_mm, pud, pmd); 180 pud_populate(&init_mm, pud, pmd);
182 if (pmd != pmd_offset(pud, 0)) { 181 if (pmd != pmd_offset(pud, 0))
183 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", 182 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
184 pmd, pmd_offset(pud, 0)); 183 pmd, pmd_offset(pud, 0));
185 return;
186 }
187 } 184 }
188 pmd = pmd_offset(pud, vaddr); 185 return pmd_offset(pud, vaddr);
186}
187
188static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
189{
189 if (pmd_none(*pmd)) { 190 if (pmd_none(*pmd)) {
190 pte = (pte_t *) spp_getpage(); 191 pte_t *pte = (pte_t *) spp_getpage();
191 pmd_populate_kernel(&init_mm, pmd, pte); 192 pmd_populate_kernel(&init_mm, pmd, pte);
192 if (pte != pte_offset_kernel(pmd, 0)) { 193 if (pte != pte_offset_kernel(pmd, 0))
193 printk(KERN_ERR "PAGETABLE BUG #02!\n"); 194 printk(KERN_ERR "PAGETABLE BUG #02!\n");
194 return;
195 }
196 } 195 }
196 return pte_offset_kernel(pmd, vaddr);
197}
198
199void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
200{
201 pud_t *pud;
202 pmd_t *pmd;
203 pte_t *pte;
204
205 pud = pud_page + pud_index(vaddr);
206 pmd = fill_pmd(pud, vaddr);
207 pte = fill_pte(pmd, vaddr);
197 208
198 pte = pte_offset_kernel(pmd, vaddr);
199 set_pte(pte, new_pte); 209 set_pte(pte, new_pte);
200 210
201 /* 211 /*
@@ -205,8 +215,7 @@ set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
205 __flush_tlb_one(vaddr); 215 __flush_tlb_one(vaddr);
206} 216}
207 217
208void 218void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
209set_pte_vaddr(unsigned long vaddr, pte_t pteval)
210{ 219{
211 pgd_t *pgd; 220 pgd_t *pgd;
212 pud_t *pud_page; 221 pud_t *pud_page;
@@ -223,6 +232,24 @@ set_pte_vaddr(unsigned long vaddr, pte_t pteval)
223 set_pte_vaddr_pud(pud_page, vaddr, pteval); 232 set_pte_vaddr_pud(pud_page, vaddr, pteval);
224} 233}
225 234
235pmd_t * __init populate_extra_pmd(unsigned long vaddr)
236{
237 pgd_t *pgd;
238 pud_t *pud;
239
240 pgd = pgd_offset_k(vaddr);
241 pud = fill_pud(pgd, vaddr);
242 return fill_pmd(pud, vaddr);
243}
244
245pte_t * __init populate_extra_pte(unsigned long vaddr)
246{
247 pmd_t *pmd;
248
249 pmd = populate_extra_pmd(vaddr);
250 return fill_pte(pmd, vaddr);
251}
252
226/* 253/*
227 * Create large page table mappings for a range of physical addresses. 254 * Create large page table mappings for a range of physical addresses.
228 */ 255 */
@@ -291,13 +318,9 @@ void __init cleanup_highmap(void)
291 } 318 }
292} 319}
293 320
294static unsigned long __initdata table_start;
295static unsigned long __meminitdata table_end;
296static unsigned long __meminitdata table_top;
297
298static __ref void *alloc_low_page(unsigned long *phys) 321static __ref void *alloc_low_page(unsigned long *phys)
299{ 322{
300 unsigned long pfn = table_end++; 323 unsigned long pfn = e820_table_end++;
301 void *adr; 324 void *adr;
302 325
303 if (after_bootmem) { 326 if (after_bootmem) {
@@ -307,7 +330,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
307 return adr; 330 return adr;
308 } 331 }
309 332
310 if (pfn >= table_top) 333 if (pfn >= e820_table_top)
311 panic("alloc_low_page: ran out of memory"); 334 panic("alloc_low_page: ran out of memory");
312 335
313 adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); 336 adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
@@ -547,58 +570,10 @@ phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
547 return phys_pud_init(pud, addr, end, page_size_mask); 570 return phys_pud_init(pud, addr, end, page_size_mask);
548} 571}
549 572
550static void __init find_early_table_space(unsigned long end, int use_pse, 573unsigned long __init
551 int use_gbpages) 574kernel_physical_mapping_init(unsigned long start,
552{ 575 unsigned long end,
553 unsigned long puds, pmds, ptes, tables, start; 576 unsigned long page_size_mask)
554
555 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
556 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
557 if (use_gbpages) {
558 unsigned long extra;
559 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
560 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
561 } else
562 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
563 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
564
565 if (use_pse) {
566 unsigned long extra;
567 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
568 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
569 } else
570 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
571 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
572
573 /*
574 * RED-PEN putting page tables only on node 0 could
575 * cause a hotspot and fill up ZONE_DMA. The page tables
576 * need roughly 0.5KB per GB.
577 */
578 start = 0x8000;
579 table_start = find_e820_area(start, end, tables, PAGE_SIZE);
580 if (table_start == -1UL)
581 panic("Cannot find space for the kernel page tables");
582
583 table_start >>= PAGE_SHIFT;
584 table_end = table_start;
585 table_top = table_start + (tables >> PAGE_SHIFT);
586
587 printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
588 end, table_start << PAGE_SHIFT, table_top << PAGE_SHIFT);
589}
590
591static void __init init_gbpages(void)
592{
593 if (direct_gbpages && cpu_has_gbpages)
594 printk(KERN_INFO "Using GB pages for direct mapping\n");
595 else
596 direct_gbpages = 0;
597}
598
599static unsigned long __meminit kernel_physical_mapping_init(unsigned long start,
600 unsigned long end,
601 unsigned long page_size_mask)
602{ 577{
603 578
604 unsigned long next, last_map_addr = end; 579 unsigned long next, last_map_addr = end;
@@ -635,176 +610,6 @@ static unsigned long __meminit kernel_physical_mapping_init(unsigned long start,
635 return last_map_addr; 610 return last_map_addr;
636} 611}
637 612
638struct map_range {
639 unsigned long start;
640 unsigned long end;
641 unsigned page_size_mask;
642};
643
644#define NR_RANGE_MR 5
645
646static int save_mr(struct map_range *mr, int nr_range,
647 unsigned long start_pfn, unsigned long end_pfn,
648 unsigned long page_size_mask)
649{
650
651 if (start_pfn < end_pfn) {
652 if (nr_range >= NR_RANGE_MR)
653 panic("run out of range for init_memory_mapping\n");
654 mr[nr_range].start = start_pfn<<PAGE_SHIFT;
655 mr[nr_range].end = end_pfn<<PAGE_SHIFT;
656 mr[nr_range].page_size_mask = page_size_mask;
657 nr_range++;
658 }
659
660 return nr_range;
661}
662
663/*
664 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
665 * This runs before bootmem is initialized and gets pages directly from
666 * the physical memory. To access them they are temporarily mapped.
667 */
668unsigned long __init_refok init_memory_mapping(unsigned long start,
669 unsigned long end)
670{
671 unsigned long last_map_addr = 0;
672 unsigned long page_size_mask = 0;
673 unsigned long start_pfn, end_pfn;
674 unsigned long pos;
675
676 struct map_range mr[NR_RANGE_MR];
677 int nr_range, i;
678 int use_pse, use_gbpages;
679
680 printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
681
682 /*
683 * Find space for the kernel direct mapping tables.
684 *
685 * Later we should allocate these tables in the local node of the
686 * memory mapped. Unfortunately this is done currently before the
687 * nodes are discovered.
688 */
689 if (!after_bootmem)
690 init_gbpages();
691
692#ifdef CONFIG_DEBUG_PAGEALLOC
693 /*
694 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
695 * This will simplify cpa(), which otherwise needs to support splitting
696 * large pages into small in interrupt context, etc.
697 */
698 use_pse = use_gbpages = 0;
699#else
700 use_pse = cpu_has_pse;
701 use_gbpages = direct_gbpages;
702#endif
703
704 if (use_gbpages)
705 page_size_mask |= 1 << PG_LEVEL_1G;
706 if (use_pse)
707 page_size_mask |= 1 << PG_LEVEL_2M;
708
709 memset(mr, 0, sizeof(mr));
710 nr_range = 0;
711
712 /* head if not big page alignment ?*/
713 start_pfn = start >> PAGE_SHIFT;
714 pos = start_pfn << PAGE_SHIFT;
715 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
716 << (PMD_SHIFT - PAGE_SHIFT);
717 if (end_pfn > (end >> PAGE_SHIFT))
718 end_pfn = end >> PAGE_SHIFT;
719 if (start_pfn < end_pfn) {
720 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
721 pos = end_pfn << PAGE_SHIFT;
722 }
723
724 /* big page (2M) range*/
725 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
726 << (PMD_SHIFT - PAGE_SHIFT);
727 end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
728 << (PUD_SHIFT - PAGE_SHIFT);
729 if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
730 end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
731 if (start_pfn < end_pfn) {
732 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
733 page_size_mask & (1<<PG_LEVEL_2M));
734 pos = end_pfn << PAGE_SHIFT;
735 }
736
737 /* big page (1G) range */
738 start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
739 << (PUD_SHIFT - PAGE_SHIFT);
740 end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
741 if (start_pfn < end_pfn) {
742 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
743 page_size_mask &
744 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
745 pos = end_pfn << PAGE_SHIFT;
746 }
747
748 /* tail is not big page (1G) alignment */
749 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
750 << (PMD_SHIFT - PAGE_SHIFT);
751 end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
752 if (start_pfn < end_pfn) {
753 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
754 page_size_mask & (1<<PG_LEVEL_2M));
755 pos = end_pfn << PAGE_SHIFT;
756 }
757
758 /* tail is not big page (2M) alignment */
759 start_pfn = pos>>PAGE_SHIFT;
760 end_pfn = end>>PAGE_SHIFT;
761 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
762
763 /* try to merge same page size and continuous */
764 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
765 unsigned long old_start;
766 if (mr[i].end != mr[i+1].start ||
767 mr[i].page_size_mask != mr[i+1].page_size_mask)
768 continue;
769 /* move it */
770 old_start = mr[i].start;
771 memmove(&mr[i], &mr[i+1],
772 (nr_range - 1 - i) * sizeof (struct map_range));
773 mr[i--].start = old_start;
774 nr_range--;
775 }
776
777 for (i = 0; i < nr_range; i++)
778 printk(KERN_DEBUG " %010lx - %010lx page %s\n",
779 mr[i].start, mr[i].end,
780 (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
781 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
782
783 if (!after_bootmem)
784 find_early_table_space(end, use_pse, use_gbpages);
785
786 for (i = 0; i < nr_range; i++)
787 last_map_addr = kernel_physical_mapping_init(
788 mr[i].start, mr[i].end,
789 mr[i].page_size_mask);
790
791 if (!after_bootmem)
792 mmu_cr4_features = read_cr4();
793 __flush_tlb_all();
794
795 if (!after_bootmem && table_end > table_start)
796 reserve_early(table_start << PAGE_SHIFT,
797 table_end << PAGE_SHIFT, "PGTABLE");
798
799 printk(KERN_INFO "last_map_addr: %lx end: %lx\n",
800 last_map_addr, end);
801
802 if (!after_bootmem)
803 early_memtest(start, end);
804
805 return last_map_addr >> PAGE_SHIFT;
806}
807
808#ifndef CONFIG_NUMA 613#ifndef CONFIG_NUMA
809void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn) 614void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn)
810{ 615{
@@ -876,28 +681,6 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
876 681
877#endif /* CONFIG_MEMORY_HOTPLUG */ 682#endif /* CONFIG_MEMORY_HOTPLUG */
878 683
879/*
880 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
881 * is valid. The argument is a physical page number.
882 *
883 *
884 * On x86, access has to be given to the first megabyte of ram because that area
885 * contains bios code and data regions used by X and dosemu and similar apps.
886 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
887 * mmio resources as well as potential bios/acpi data regions.
888 */
889int devmem_is_allowed(unsigned long pagenr)
890{
891 if (pagenr <= 256)
892 return 1;
893 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
894 return 0;
895 if (!page_is_ram(pagenr))
896 return 1;
897 return 0;
898}
899
900
901static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, 684static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
902 kcore_modules, kcore_vsyscall; 685 kcore_modules, kcore_vsyscall;
903 686
@@ -947,43 +730,6 @@ void __init mem_init(void)
947 initsize >> 10); 730 initsize >> 10);
948} 731}
949 732
950void free_init_pages(char *what, unsigned long begin, unsigned long end)
951{
952 unsigned long addr = begin;
953
954 if (addr >= end)
955 return;
956
957 /*
958 * If debugging page accesses then do not free this memory but
959 * mark them not present - any buggy init-section access will
960 * create a kernel page fault:
961 */
962#ifdef CONFIG_DEBUG_PAGEALLOC
963 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
964 begin, PAGE_ALIGN(end));
965 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
966#else
967 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
968
969 for (; addr < end; addr += PAGE_SIZE) {
970 ClearPageReserved(virt_to_page(addr));
971 init_page_count(virt_to_page(addr));
972 memset((void *)(addr & ~(PAGE_SIZE-1)),
973 POISON_FREE_INITMEM, PAGE_SIZE);
974 free_page(addr);
975 totalram_pages++;
976 }
977#endif
978}
979
980void free_initmem(void)
981{
982 free_init_pages("unused kernel memory",
983 (unsigned long)(&__init_begin),
984 (unsigned long)(&__init_end));
985}
986
987#ifdef CONFIG_DEBUG_RODATA 733#ifdef CONFIG_DEBUG_RODATA
988const int rodata_test_data = 0xC3; 734const int rodata_test_data = 0xC3;
989EXPORT_SYMBOL_GPL(rodata_test_data); 735EXPORT_SYMBOL_GPL(rodata_test_data);
@@ -1049,13 +795,6 @@ void mark_rodata_ro(void)
1049 795
1050#endif 796#endif
1051 797
1052#ifdef CONFIG_BLK_DEV_INITRD
1053void free_initrd_mem(unsigned long start, unsigned long end)
1054{
1055 free_init_pages("initrd memory", start, end);
1056}
1057#endif
1058
1059int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, 798int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
1060 int flags) 799 int flags)
1061{ 800{