aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_32.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2009-01-16 06:59:33 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-16 07:47:04 -0500
commita3c6018e565dc07cf3738ace6bbe412f97b1bba8 (patch)
tree1c88b94917a479f4a0d0674168ef7aef98f6e555 /arch/x86/mm/init_32.c
parent18c07cf530cf4aa8b7551801f68ed40db5ee4e45 (diff)
x86: fix assumed to be contiguous leaf page tables for kmap_atomic region (take 2)
Debugging and original patch from Nick Piggin <npiggin@suse.de> The early fixmap pmd entry inserted at the very top of the KVA is causing the subsequent fixmap mapping code to not provide physically linear pte pages over the kmap atomic portion of the fixmap (which relies on said property to calculate pte addresses). This has caused weird boot failures in kmap_atomic much later in the boot process (initial userspace faults) on a 32-bit PAE system with a larger number of CPUs (smaller CPU counts tend not to run over into the next page so don't show up the problem). Solve this by attempting to clear out the page table, and copy any of its entries to the new one. Also, add a bug if a nonlinear condition is encountered and can't be resolved, which might save some hours of debugging if this fragile scheme ever breaks again... Once we have such logic, we can also use it to eliminate the early ioremap trickery around the page table setup for the fixmap area. This also fixes potential issues with FIX_* entries sharing the leaf page table with the early ioremap ones getting discarded by early_ioremap_clear() and not restored by early_ioremap_reset(). It at once eliminates the temporary (and configuration, namely NR_CPUS, dependent) unavailability of early fixed mappings during the time the fixmap area page tables get constructed. Finally, also replace the hard coded calculation of the initial table space needed for the fixmap area with a proper one, allowing kernels configured for large CPU counts to actually boot. Based-on: Nick Piggin <npiggin@suse.de> Signed-off-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/init_32.c')
-rw-r--r--arch/x86/mm/init_32.c48
1 files changed, 45 insertions, 3 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 88f1b10de3b..2cef0507441 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -138,6 +138,47 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
138 return pte_offset_kernel(pmd, 0); 138 return pte_offset_kernel(pmd, 0);
139} 139}
140 140
141static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
142 unsigned long vaddr, pte_t *lastpte)
143{
144#ifdef CONFIG_HIGHMEM
145 /*
146 * Something (early fixmap) may already have put a pte
147 * page here, which causes the page table allocation
148 * to become nonlinear. Attempt to fix it, and if it
149 * is still nonlinear then we have to bug.
150 */
151 int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
152 int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
153
154 if (pmd_idx_kmap_begin != pmd_idx_kmap_end
155 && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
156 && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
157 && ((__pa(pte) >> PAGE_SHIFT) < table_start
158 || (__pa(pte) >> PAGE_SHIFT) >= table_end)) {
159 pte_t *newpte;
160 int i;
161
162 BUG_ON(after_init_bootmem);
163 newpte = alloc_low_page();
164 for (i = 0; i < PTRS_PER_PTE; i++)
165 set_pte(newpte + i, pte[i]);
166
167 paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
168 set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
169 BUG_ON(newpte != pte_offset_kernel(pmd, 0));
170 __flush_tlb_all();
171
172 paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
173 pte = newpte;
174 }
175 BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
176 && vaddr > fix_to_virt(FIX_KMAP_END)
177 && lastpte && lastpte + PTRS_PER_PTE != pte);
178#endif
179 return pte;
180}
181
141/* 182/*
142 * This function initializes a certain range of kernel virtual memory 183 * This function initializes a certain range of kernel virtual memory
143 * with new bootmem page tables, everywhere page tables are missing in 184 * with new bootmem page tables, everywhere page tables are missing in
@@ -154,6 +195,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
154 unsigned long vaddr; 195 unsigned long vaddr;
155 pgd_t *pgd; 196 pgd_t *pgd;
156 pmd_t *pmd; 197 pmd_t *pmd;
198 pte_t *pte = NULL;
157 199
158 vaddr = start; 200 vaddr = start;
159 pgd_idx = pgd_index(vaddr); 201 pgd_idx = pgd_index(vaddr);
@@ -165,7 +207,8 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
165 pmd = pmd + pmd_index(vaddr); 207 pmd = pmd + pmd_index(vaddr);
166 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); 208 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
167 pmd++, pmd_idx++) { 209 pmd++, pmd_idx++) {
168 one_page_table_init(pmd); 210 pte = page_table_kmap_check(one_page_table_init(pmd),
211 pmd, vaddr, pte);
169 212
170 vaddr += PMD_SIZE; 213 vaddr += PMD_SIZE;
171 } 214 }
@@ -508,7 +551,6 @@ static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
508 * Fixed mappings, only the page table structure has to be 551 * Fixed mappings, only the page table structure has to be
509 * created - mappings will be set by set_fixmap(): 552 * created - mappings will be set by set_fixmap():
510 */ 553 */
511 early_ioremap_clear();
512 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 554 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
513 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; 555 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
514 page_table_range_init(vaddr, end, pgd_base); 556 page_table_range_init(vaddr, end, pgd_base);
@@ -801,7 +843,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse)
801 tables += PAGE_ALIGN(ptes * sizeof(pte_t)); 843 tables += PAGE_ALIGN(ptes * sizeof(pte_t));
802 844
803 /* for fixmap */ 845 /* for fixmap */
804 tables += PAGE_SIZE * 2; 846 tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t));
805 847
806 /* 848 /*
807 * RED-PEN putting page tables only on node 0 could 849 * RED-PEN putting page tables only on node 0 could