aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-06-29 03:39:06 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 07:16:07 -0400
commita04ad82d0bff4bb564f290eb50982e02458592d9 (patch)
tree7b5166e5df65a47d88cab708d246f8c9948ac286 /arch
parentb4df32f4aeef8794d0135fc8dc250acb44cfee60 (diff)
x86: fix init_memory_mapping over boundary, v4
use PMD_SHIFT to calculate boundary also adjust size for pre-allocated table size Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/init_32.c89
1 files changed, 67 insertions, 22 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 90ca67be965b..aa5e37c9f4b4 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -184,8 +184,9 @@ static inline int is_kernel_text(unsigned long addr)
184 * PAGE_OFFSET: 184 * PAGE_OFFSET:
185 */ 185 */
186static void __init kernel_physical_mapping_init(pgd_t *pgd_base, 186static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
187 unsigned long start, 187 unsigned long start_pfn,
188 unsigned long end) 188 unsigned long end_pfn,
189 int use_pse)
189{ 190{
190 int pgd_idx, pmd_idx, pte_ofs; 191 int pgd_idx, pmd_idx, pte_ofs;
191 unsigned long pfn; 192 unsigned long pfn;
@@ -193,33 +194,33 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
193 pmd_t *pmd; 194 pmd_t *pmd;
194 pte_t *pte; 195 pte_t *pte;
195 unsigned pages_2m = 0, pages_4k = 0; 196 unsigned pages_2m = 0, pages_4k = 0;
196 unsigned limit_pfn = end >> PAGE_SHIFT;
197 197
198 pgd_idx = pgd_index(start + PAGE_OFFSET); 198 if (!cpu_has_pse)
199 pgd = pgd_base + pgd_idx; 199 use_pse = 0;
200 pfn = start >> PAGE_SHIFT;
201 200
201 pfn = start_pfn;
202 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
203 pgd = pgd_base + pgd_idx;
202 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { 204 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
203 pmd = one_md_table_init(pgd); 205 pmd = one_md_table_init(pgd);
204 if (pfn >= limit_pfn)
205 continue;
206 206
207 for (pmd_idx = 0; 207 if (pfn >= end_pfn)
208 pmd_idx < PTRS_PER_PMD && pfn < limit_pfn; 208 continue;
209#ifdef CONFIG_X86_PAE
210 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
211 pmd += pmd_idx;
212#else
213 pmd_idx = 0;
214#endif
215 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
209 pmd++, pmd_idx++) { 216 pmd++, pmd_idx++) {
210 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; 217 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
211 218
212 /* 219 /*
213 * Map with big pages if possible, otherwise 220 * Map with big pages if possible, otherwise
214 * create normal page tables: 221 * create normal page tables:
215 *
216 * Don't use a large page for the first 2/4MB of memory
217 * because there are often fixed size MTRRs in there
218 * and overlapping MTRRs into large pages can cause
219 * slowdowns.
220 */ 222 */
221 if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0) && 223 if (use_pse) {
222 (pfn + PTRS_PER_PTE) <= limit_pfn) {
223 unsigned int addr2; 224 unsigned int addr2;
224 pgprot_t prot = PAGE_KERNEL_LARGE; 225 pgprot_t prot = PAGE_KERNEL_LARGE;
225 226
@@ -238,8 +239,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
238 } 239 }
239 pte = one_page_table_init(pmd); 240 pte = one_page_table_init(pmd);
240 241
241 for (pte_ofs = 0; 242 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
242 pte_ofs < PTRS_PER_PTE && pfn < limit_pfn; 243 pte += pte_ofs;
244 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
243 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { 245 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
244 pgprot_t prot = PAGE_KERNEL; 246 pgprot_t prot = PAGE_KERNEL;
245 247
@@ -738,14 +740,18 @@ static void __init find_early_table_space(unsigned long end)
738 740
739 if (cpu_has_pse) { 741 if (cpu_has_pse) {
740 unsigned long extra; 742 unsigned long extra;
741 extra = end - ((end>>21) << 21); 743
742 extra += (2UL<<20); 744 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
745 extra += PMD_SIZE;
743 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; 746 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
744 } else 747 } else
745 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; 748 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
746 749
747 tables += PAGE_ALIGN(ptes * sizeof(pte_t)); 750 tables += PAGE_ALIGN(ptes * sizeof(pte_t));
748 751
752 /* for fixmap */
753 tables += PAGE_SIZE * 2;
754
749 /* 755 /*
750 * RED-PEN putting page tables only on node 0 could 756 * RED-PEN putting page tables only on node 0 could
751 * cause a hotspot and fill up ZONE_DMA. The page tables 757 * cause a hotspot and fill up ZONE_DMA. The page tables
@@ -770,6 +776,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
770 unsigned long end) 776 unsigned long end)
771{ 777{
772 pgd_t *pgd_base = swapper_pg_dir; 778 pgd_t *pgd_base = swapper_pg_dir;
779 unsigned long start_pfn, end_pfn;
780 unsigned long big_page_start;
773 781
774 /* 782 /*
775 * Find space for the kernel direct mapping tables. 783 * Find space for the kernel direct mapping tables.
@@ -794,7 +802,44 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
794 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL; 802 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
795 } 803 }
796 804
797 kernel_physical_mapping_init(pgd_base, start, end); 805 /*
806 * Don't use a large page for the first 2/4MB of memory
807 * because there are often fixed size MTRRs in there
808 * and overlapping MTRRs into large pages can cause
809 * slowdowns.
810 */
811 big_page_start = PMD_SIZE;
812
813 if (start < big_page_start) {
814 start_pfn = start >> PAGE_SHIFT;
815 end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
816 } else {
817 /* head is not big page alignment ? */
818 start_pfn = start >> PAGE_SHIFT;
819 end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
820 << (PMD_SHIFT - PAGE_SHIFT);
821 }
822 if (start_pfn < end_pfn)
823 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
824
825 /* big page range */
826 start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
827 << (PMD_SHIFT - PAGE_SHIFT);
828 if (start_pfn < (big_page_start >> PAGE_SHIFT))
829 start_pfn = big_page_start >> PAGE_SHIFT;
830 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
831 if (start_pfn < end_pfn)
832 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
833 cpu_has_pse);
834
835 /* tail is not big page alignment ? */
836 start_pfn = end_pfn;
837 if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
838 end_pfn = end >> PAGE_SHIFT;
839 if (start_pfn < end_pfn)
840 kernel_physical_mapping_init(pgd_base, start_pfn,
841 end_pfn, 0);
842 }
798 843
799 early_ioremap_page_table_range_init(pgd_base); 844 early_ioremap_page_table_range_init(pgd_base);
800 845