aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_32.c
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-06-28 06:30:39 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 07:16:06 -0400
commit7482b0e962e128c5b574aa29761f97164189ef14 (patch)
tree11f99e89b1957c8b47fc1a17cfafc1899471b112 /arch/x86/mm/init_32.c
parentdf366e9822beca97115ba9745cbe1ea1f26fb111 (diff)
x86: fix init_memory_mapping over boundary v3
some ram-end boundary only has page alignment, instead of 2M alignment. v2: make init_memory_mapping more solid: start could be any value other than 0 v3: fix NON PAE by handling left over in kernel_physical_mapping Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/init_32.c')
-rw-r--r--arch/x86/mm/init_32.c24
1 files changed, 14 insertions, 10 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index b9cf7f705302..90ca67be965b 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -195,7 +195,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
195 unsigned pages_2m = 0, pages_4k = 0; 195 unsigned pages_2m = 0, pages_4k = 0;
196 unsigned limit_pfn = end >> PAGE_SHIFT; 196 unsigned limit_pfn = end >> PAGE_SHIFT;
197 197
198 pgd_idx = pgd_index(PAGE_OFFSET); 198 pgd_idx = pgd_index(start + PAGE_OFFSET);
199 pgd = pgd_base + pgd_idx; 199 pgd = pgd_base + pgd_idx;
200 pfn = start >> PAGE_SHIFT; 200 pfn = start >> PAGE_SHIFT;
201 201
@@ -218,7 +218,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
218 * and overlapping MTRRs into large pages can cause 218 * and overlapping MTRRs into large pages can cause
219 * slowdowns. 219 * slowdowns.
220 */ 220 */
221 if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) { 221 if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0) &&
222 (pfn + PTRS_PER_PTE) <= limit_pfn) {
222 unsigned int addr2; 223 unsigned int addr2;
223 pgprot_t prot = PAGE_KERNEL_LARGE; 224 pgprot_t prot = PAGE_KERNEL_LARGE;
224 225
@@ -233,13 +234,12 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
233 set_pmd(pmd, pfn_pmd(pfn, prot)); 234 set_pmd(pmd, pfn_pmd(pfn, prot));
234 235
235 pfn += PTRS_PER_PTE; 236 pfn += PTRS_PER_PTE;
236 max_pfn_mapped = pfn;
237 continue; 237 continue;
238 } 238 }
239 pte = one_page_table_init(pmd); 239 pte = one_page_table_init(pmd);
240 240
241 for (pte_ofs = 0; 241 for (pte_ofs = 0;
242 pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; 242 pte_ofs < PTRS_PER_PTE && pfn < limit_pfn;
243 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { 243 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
244 pgprot_t prot = PAGE_KERNEL; 244 pgprot_t prot = PAGE_KERNEL;
245 245
@@ -249,7 +249,6 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
249 pages_4k++; 249 pages_4k++;
250 set_pte(pte, pfn_pte(pfn, prot)); 250 set_pte(pte, pfn_pte(pfn, prot));
251 } 251 }
252 max_pfn_mapped = pfn;
253 } 252 }
254 } 253 }
255 update_page_count(PG_LEVEL_2M, pages_2m); 254 update_page_count(PG_LEVEL_2M, pages_2m);
@@ -729,7 +728,7 @@ void __init setup_bootmem_allocator(void)
729 728
730static void __init find_early_table_space(unsigned long end) 729static void __init find_early_table_space(unsigned long end)
731{ 730{
732 unsigned long puds, pmds, tables, start; 731 unsigned long puds, pmds, ptes, tables, start;
733 732
734 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 733 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
735 tables = PAGE_ALIGN(puds * sizeof(pud_t)); 734 tables = PAGE_ALIGN(puds * sizeof(pud_t));
@@ -737,10 +736,15 @@ static void __init find_early_table_space(unsigned long end)
737 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 736 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
738 tables += PAGE_ALIGN(pmds * sizeof(pmd_t)); 737 tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
739 738
740 if (!cpu_has_pse) { 739 if (cpu_has_pse) {
741 int ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; 740 unsigned long extra;
742 tables += PAGE_ALIGN(ptes * sizeof(pte_t)); 741 extra = end - ((end>>21) << 21);
743 } 742 extra += (2UL<<20);
743 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
744 } else
745 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
746
747 tables += PAGE_ALIGN(ptes * sizeof(pte_t));
744 748
745 /* 749 /*
746 * RED-PEN putting page tables only on node 0 could 750 * RED-PEN putting page tables only on node 0 could