aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2012-11-16 22:39:06 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2012-11-17 14:59:29 -0500
commit719272c45b821d38608fc333700bde1a89c56c59 (patch)
treecf215602048fcb36c8fc50c731db986d5cea51aa /arch/x86
parentddd3509df8f8d4f1cf4784f559d702ce00dc8846 (diff)
x86, mm: only call early_ioremap_page_table_range_init() once
On 32bit, before patcheset that only set page table for ram, we only call that one time. Now, we are calling that during every init_memory_mapping if we have holes under max_low_pfn. We should only call it one time after all ranges under max_low_page get mapped just like we did before. Also that could avoid the risk to run out of pgt_buf in BRK. Need to update page_table_range_init() to count the pages for kmap page table at first, and use new added alloc_low_pages() to get pages in sequence. That will conform to the requirement that pages need to be in low to high order. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1353123563-3103-30-git-send-email-yinghai@kernel.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/init.c13
-rw-r--r--arch/x86/mm/init_32.c47
2 files changed, 46 insertions, 14 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index cb4f8ba70ecc..bed4888c6f4f 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -343,14 +343,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
343 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, 343 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
344 mr[i].page_size_mask); 344 mr[i].page_size_mask);
345 345
346#ifdef CONFIG_X86_32
347 early_ioremap_page_table_range_init();
348
349 load_cr3(swapper_pg_dir);
350#endif
351
352 __flush_tlb_all();
353
354 add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT); 346 add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
355 347
356 return ret >> PAGE_SHIFT; 348 return ret >> PAGE_SHIFT;
@@ -447,7 +439,12 @@ void __init init_mem_mapping(void)
447 /* can we preseve max_low_pfn ?*/ 439 /* can we preseve max_low_pfn ?*/
448 max_low_pfn = max_pfn; 440 max_low_pfn = max_pfn;
449 } 441 }
442#else
443 early_ioremap_page_table_range_init();
444 load_cr3(swapper_pg_dir);
445 __flush_tlb_all();
450#endif 446#endif
447
451 early_memtest(0, max_pfn_mapped << PAGE_SHIFT); 448 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
452} 449}
453 450
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index a7f2df1cdcfd..0ae1ba8bc1b9 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -135,8 +135,39 @@ pte_t * __init populate_extra_pte(unsigned long vaddr)
135 return one_page_table_init(pmd) + pte_idx; 135 return one_page_table_init(pmd) + pte_idx;
136} 136}
137 137
138static unsigned long __init
139page_table_range_init_count(unsigned long start, unsigned long end)
140{
141 unsigned long count = 0;
142#ifdef CONFIG_HIGHMEM
143 int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
144 int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
145 int pgd_idx, pmd_idx;
146 unsigned long vaddr;
147
148 if (pmd_idx_kmap_begin == pmd_idx_kmap_end)
149 return 0;
150
151 vaddr = start;
152 pgd_idx = pgd_index(vaddr);
153
154 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
155 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
156 pmd_idx++) {
157 if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin &&
158 (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end)
159 count++;
160 vaddr += PMD_SIZE;
161 }
162 pmd_idx = 0;
163 }
164#endif
165 return count;
166}
167
138static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, 168static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
139 unsigned long vaddr, pte_t *lastpte) 169 unsigned long vaddr, pte_t *lastpte,
170 void **adr)
140{ 171{
141#ifdef CONFIG_HIGHMEM 172#ifdef CONFIG_HIGHMEM
142 /* 173 /*
@@ -150,16 +181,15 @@ static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
150 181
151 if (pmd_idx_kmap_begin != pmd_idx_kmap_end 182 if (pmd_idx_kmap_begin != pmd_idx_kmap_end
152 && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin 183 && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
153 && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end 184 && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) {
154 && ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start
155 || (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) {
156 pte_t *newpte; 185 pte_t *newpte;
157 int i; 186 int i;
158 187
159 BUG_ON(after_bootmem); 188 BUG_ON(after_bootmem);
160 newpte = alloc_low_page(); 189 newpte = *adr;
161 for (i = 0; i < PTRS_PER_PTE; i++) 190 for (i = 0; i < PTRS_PER_PTE; i++)
162 set_pte(newpte + i, pte[i]); 191 set_pte(newpte + i, pte[i]);
192 *adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE);
163 193
164 paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); 194 paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
165 set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); 195 set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
@@ -193,6 +223,11 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
193 pgd_t *pgd; 223 pgd_t *pgd;
194 pmd_t *pmd; 224 pmd_t *pmd;
195 pte_t *pte = NULL; 225 pte_t *pte = NULL;
226 unsigned long count = page_table_range_init_count(start, end);
227 void *adr = NULL;
228
229 if (count)
230 adr = alloc_low_pages(count);
196 231
197 vaddr = start; 232 vaddr = start;
198 pgd_idx = pgd_index(vaddr); 233 pgd_idx = pgd_index(vaddr);
@@ -205,7 +240,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
205 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); 240 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
206 pmd++, pmd_idx++) { 241 pmd++, pmd_idx++) {
207 pte = page_table_kmap_check(one_page_table_init(pmd), 242 pte = page_table_kmap_check(one_page_table_init(pmd),
208 pmd, vaddr, pte); 243 pmd, vaddr, pte, &adr);
209 244
210 vaddr += PMD_SIZE; 245 vaddr += PMD_SIZE;
211 } 246 }