aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-06-24 15:18:14 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 07:10:32 -0400
commit4e29684c40f2a332ba4d05f6482d5807725d5624 (patch)
tree297bc3f125ce07a915f7b2cf42c1f32a82453b42
parentc3c2fee38462fa34b90e0a5427c7fc564bb5c96c (diff)
x86: introduce init_memory_mapping for 32bit #1
... so can we use mem below max_low_pfn earlier. this allows us to move several functions more early instead of waiting to after paging_init. That includes moving relocate_initrd() earlier in the bootup, and kva related early setup done in initmem_init. (in followup patches) Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/setup_32.c10
-rw-r--r--arch/x86/mm/init_32.c141
-rw-r--r--include/asm-x86/page_32.h2
3 files changed, 120 insertions, 33 deletions
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index bba8d57bd7d8..03007cada0d1 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -226,10 +226,8 @@ static void __init reserve_initrd(void)
226 } 226 }
227 227
228 /* We need to move the initrd down into lowmem */ 228 /* We need to move the initrd down into lowmem */
229 ramdisk_target = max_pfn_mapped<<PAGE_SHIFT; 229 ramdisk_here = find_e820_area(0, end_of_lowmem, ramdisk_size,
230 ramdisk_here = find_e820_area(min(ramdisk_target, end_of_lowmem>>1), 230 PAGE_SIZE);
231 end_of_lowmem, ramdisk_size,
232 PAGE_SIZE);
233 231
234 if (ramdisk_here == -1ULL) 232 if (ramdisk_here == -1ULL)
235 panic("Cannot find place for new RAMDISK of size %lld\n", 233 panic("Cannot find place for new RAMDISK of size %lld\n",
@@ -433,8 +431,12 @@ void __init setup_arch(char **cmdline_p)
433 max_pfn = e820_end_of_ram(); 431 max_pfn = e820_end_of_ram();
434 } 432 }
435 433
434 /* max_low_pfn get updated here */
436 find_low_pfn_range(); 435 find_low_pfn_range();
437 436
437 /* max_pfn_mapped is updated here */
438 init_memory_mapping(0, (max_low_pfn << PAGE_SHIFT));
439
438 reserve_initrd(); 440 reserve_initrd();
439 441
440 dmi_scan_machine(); 442 dmi_scan_machine();
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 20ca29591abe..619058e6bff8 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -57,6 +57,27 @@ unsigned long highstart_pfn, highend_pfn;
57 57
58static noinline int do_test_wp_bit(void); 58static noinline int do_test_wp_bit(void);
59 59
60
61static unsigned long __initdata table_start;
62static unsigned long __meminitdata table_end;
63static unsigned long __meminitdata table_top;
64
65static int __initdata after_init_bootmem;
66
67static __init void *alloc_low_page(unsigned long *phys)
68{
69 unsigned long pfn = table_end++;
70 void *adr;
71
72 if (pfn >= table_top)
73 panic("alloc_low_page: ran out of memory");
74
75 adr = __va(pfn * PAGE_SIZE);
76 memset(adr, 0, PAGE_SIZE);
77 *phys = pfn * PAGE_SIZE;
78 return adr;
79}
80
60/* 81/*
61 * Creates a middle page table and puts a pointer to it in the 82 * Creates a middle page table and puts a pointer to it in the
62 * given global directory entry. This only returns the gd entry 83 * given global directory entry. This only returns the gd entry
@@ -68,9 +89,12 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
68 pmd_t *pmd_table; 89 pmd_t *pmd_table;
69 90
70#ifdef CONFIG_X86_PAE 91#ifdef CONFIG_X86_PAE
92 unsigned long phys;
71 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { 93 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
72 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); 94 if (after_init_bootmem)
73 95 pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
96 else
97 pmd_table = (pmd_t *)alloc_low_page(&phys);
74 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); 98 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
75 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); 99 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
76 pud = pud_offset(pgd, 0); 100 pud = pud_offset(pgd, 0);
@@ -92,12 +116,16 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
92 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { 116 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
93 pte_t *page_table = NULL; 117 pte_t *page_table = NULL;
94 118
119 if (after_init_bootmem) {
95#ifdef CONFIG_DEBUG_PAGEALLOC 120#ifdef CONFIG_DEBUG_PAGEALLOC
96 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); 121 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
97#endif 122#endif
98 if (!page_table) { 123 if (!page_table)
99 page_table = 124 page_table =
100 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); 125 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
126 } else {
127 unsigned long phys;
128 page_table = (pte_t *)alloc_low_page(&phys);
101 } 129 }
102 130
103 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); 131 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
@@ -155,7 +183,9 @@ static inline int is_kernel_text(unsigned long addr)
155 * of max_low_pfn pages, by creating page tables starting from address 183 * of max_low_pfn pages, by creating page tables starting from address
156 * PAGE_OFFSET: 184 * PAGE_OFFSET:
157 */ 185 */
158static void __init kernel_physical_mapping_init(pgd_t *pgd_base) 186static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
187 unsigned long start,
188 unsigned long end)
159{ 189{
160 int pgd_idx, pmd_idx, pte_ofs; 190 int pgd_idx, pmd_idx, pte_ofs;
161 unsigned long pfn; 191 unsigned long pfn;
@@ -163,18 +193,19 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
163 pmd_t *pmd; 193 pmd_t *pmd;
164 pte_t *pte; 194 pte_t *pte;
165 unsigned pages_2m = 0, pages_4k = 0; 195 unsigned pages_2m = 0, pages_4k = 0;
196 unsigned limit_pfn = end >> PAGE_SHIFT;
166 197
167 pgd_idx = pgd_index(PAGE_OFFSET); 198 pgd_idx = pgd_index(PAGE_OFFSET);
168 pgd = pgd_base + pgd_idx; 199 pgd = pgd_base + pgd_idx;
169 pfn = 0; 200 pfn = start >> PAGE_SHIFT;
170 201
171 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { 202 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
172 pmd = one_md_table_init(pgd); 203 pmd = one_md_table_init(pgd);
173 if (pfn >= max_low_pfn) 204 if (pfn >= limit_pfn)
174 continue; 205 continue;
175 206
176 for (pmd_idx = 0; 207 for (pmd_idx = 0;
177 pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; 208 pmd_idx < PTRS_PER_PMD && pfn < limit_pfn;
178 pmd++, pmd_idx++) { 209 pmd++, pmd_idx++) {
179 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; 210 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
180 211
@@ -418,20 +449,7 @@ static void __init pagetable_init(void)
418 449
419 paravirt_pagetable_setup_start(pgd_base); 450 paravirt_pagetable_setup_start(pgd_base);
420 451
421 /* Enable PSE if available */
422 if (cpu_has_pse)
423 set_in_cr4(X86_CR4_PSE);
424
425 /* Enable PGE if available */
426 if (cpu_has_pge) {
427 set_in_cr4(X86_CR4_PGE);
428 __PAGE_KERNEL |= _PAGE_GLOBAL;
429 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
430 }
431
432 kernel_physical_mapping_init(pgd_base);
433 remap_numa_kva(); 452 remap_numa_kva();
434
435 /* 453 /*
436 * Fixed mappings, only the page table structure has to be 454 * Fixed mappings, only the page table structure has to be
437 * created - mappings will be set by set_fixmap(): 455 * created - mappings will be set by set_fixmap():
@@ -703,6 +721,7 @@ void __init setup_bootmem_allocator(void)
703 free_bootmem_with_active_regions(i, max_low_pfn); 721 free_bootmem_with_active_regions(i, max_low_pfn);
704 early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT); 722 early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
705 723
724 after_init_bootmem = 1;
706} 725}
707 726
708/* 727/*
@@ -723,6 +742,77 @@ static void __init remapped_pgdat_init(void)
723 } 742 }
724} 743}
725 744
745static void __init find_early_table_space(unsigned long end)
746{
747 unsigned long puds, pmds, tables, start;
748
749 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
750 tables = PAGE_ALIGN(puds * sizeof(pud_t));
751
752 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
753 tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
754
755 /*
756 * RED-PEN putting page tables only on node 0 could
757 * cause a hotspot and fill up ZONE_DMA. The page tables
758 * need roughly 0.5KB per GB.
759 */
760 start = 0x7000;
761 table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
762 tables, PAGE_SIZE);
763 if (table_start == -1UL)
764 panic("Cannot find space for the kernel page tables");
765
766 table_start >>= PAGE_SHIFT;
767 table_end = table_start;
768 table_top = table_start + (tables>>PAGE_SHIFT);
769
770 printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
771 end, table_start << PAGE_SHIFT,
772 (table_start << PAGE_SHIFT) + tables);
773}
774
775unsigned long __init_refok init_memory_mapping(unsigned long start,
776 unsigned long end)
777{
778 pgd_t *pgd_base = swapper_pg_dir;
779
780 /*
781 * Find space for the kernel direct mapping tables.
782 */
783 if (!after_init_bootmem)
784 find_early_table_space(end);
785
786#ifdef CONFIG_X86_PAE
787 set_nx();
788 if (nx_enabled)
789 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
790#endif
791
792 /* Enable PSE if available */
793 if (cpu_has_pse)
794 set_in_cr4(X86_CR4_PSE);
795
796 /* Enable PGE if available */
797 if (cpu_has_pge) {
798 set_in_cr4(X86_CR4_PGE);
799 __PAGE_KERNEL |= _PAGE_GLOBAL;
800 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
801 }
802
803 kernel_physical_mapping_init(pgd_base, start, end);
804
805 load_cr3(swapper_pg_dir);
806
807 __flush_tlb_all();
808
809 if (!after_init_bootmem)
810 reserve_early(table_start << PAGE_SHIFT,
811 table_end << PAGE_SHIFT, "PGTABLE");
812
813 return end >> PAGE_SHIFT;
814}
815
726/* 816/*
727 * paging_init() sets up the page tables - note that the first 8MB are 817 * paging_init() sets up the page tables - note that the first 8MB are
728 * already mapped by head.S. 818 * already mapped by head.S.
@@ -732,15 +822,8 @@ static void __init remapped_pgdat_init(void)
732 */ 822 */
733void __init paging_init(void) 823void __init paging_init(void)
734{ 824{
735#ifdef CONFIG_X86_PAE
736 set_nx();
737 if (nx_enabled)
738 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
739#endif
740 pagetable_init(); 825 pagetable_init();
741 826
742 load_cr3(swapper_pg_dir);
743
744 __flush_tlb_all(); 827 __flush_tlb_all();
745 828
746 kmap_init(); 829 kmap_init();
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h
index 3810d14051e8..4ae1daba129b 100644
--- a/include/asm-x86/page_32.h
+++ b/include/asm-x86/page_32.h
@@ -93,6 +93,8 @@ extern int sysctl_legacy_va_layout;
93#define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE) 93#define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE)
94 94
95extern void find_low_pfn_range(void); 95extern void find_low_pfn_range(void);
96extern unsigned long init_memory_mapping(unsigned long start,
97 unsigned long end);
96extern void initmem_init(unsigned long, unsigned long); 98extern void initmem_init(unsigned long, unsigned long);
97extern void zone_sizes_init(void); 99extern void zone_sizes_init(void);
98extern void setup_bootmem_allocator(void); 100extern void setup_bootmem_allocator(void);