diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/io.h | 1 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 48 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 25 |
3 files changed, 45 insertions, 29 deletions
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 05cfed4485fa..1dbbdf4be9b4 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
@@ -99,7 +99,6 @@ extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); | |||
99 | * A boot-time mapping is currently limited to at most 16 pages. | 99 | * A boot-time mapping is currently limited to at most 16 pages. |
100 | */ | 100 | */ |
101 | extern void early_ioremap_init(void); | 101 | extern void early_ioremap_init(void); |
102 | extern void early_ioremap_clear(void); | ||
103 | extern void early_ioremap_reset(void); | 102 | extern void early_ioremap_reset(void); |
104 | extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); | 103 | extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); |
105 | extern void __iomem *early_memremap(unsigned long offset, unsigned long size); | 104 | extern void __iomem *early_memremap(unsigned long offset, unsigned long size); |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 88f1b10de3be..2cef05074413 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -138,6 +138,47 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) | |||
138 | return pte_offset_kernel(pmd, 0); | 138 | return pte_offset_kernel(pmd, 0); |
139 | } | 139 | } |
140 | 140 | ||
141 | static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, | ||
142 | unsigned long vaddr, pte_t *lastpte) | ||
143 | { | ||
144 | #ifdef CONFIG_HIGHMEM | ||
145 | /* | ||
146 | * Something (early fixmap) may already have put a pte | ||
147 | * page here, which causes the page table allocation | ||
148 | * to become nonlinear. Attempt to fix it, and if it | ||
149 | * is still nonlinear then we have to bug. | ||
150 | */ | ||
151 | int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; | ||
152 | int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; | ||
153 | |||
154 | if (pmd_idx_kmap_begin != pmd_idx_kmap_end | ||
155 | && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin | ||
156 | && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end | ||
157 | && ((__pa(pte) >> PAGE_SHIFT) < table_start | ||
158 | || (__pa(pte) >> PAGE_SHIFT) >= table_end)) { | ||
159 | pte_t *newpte; | ||
160 | int i; | ||
161 | |||
162 | BUG_ON(after_init_bootmem); | ||
163 | newpte = alloc_low_page(); | ||
164 | for (i = 0; i < PTRS_PER_PTE; i++) | ||
165 | set_pte(newpte + i, pte[i]); | ||
166 | |||
167 | paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); | ||
168 | set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); | ||
169 | BUG_ON(newpte != pte_offset_kernel(pmd, 0)); | ||
170 | __flush_tlb_all(); | ||
171 | |||
172 | paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); | ||
173 | pte = newpte; | ||
174 | } | ||
175 | BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) | ||
176 | && vaddr > fix_to_virt(FIX_KMAP_END) | ||
177 | && lastpte && lastpte + PTRS_PER_PTE != pte); | ||
178 | #endif | ||
179 | return pte; | ||
180 | } | ||
181 | |||
141 | /* | 182 | /* |
142 | * This function initializes a certain range of kernel virtual memory | 183 | * This function initializes a certain range of kernel virtual memory |
143 | * with new bootmem page tables, everywhere page tables are missing in | 184 | * with new bootmem page tables, everywhere page tables are missing in |
@@ -154,6 +195,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) | |||
154 | unsigned long vaddr; | 195 | unsigned long vaddr; |
155 | pgd_t *pgd; | 196 | pgd_t *pgd; |
156 | pmd_t *pmd; | 197 | pmd_t *pmd; |
198 | pte_t *pte = NULL; | ||
157 | 199 | ||
158 | vaddr = start; | 200 | vaddr = start; |
159 | pgd_idx = pgd_index(vaddr); | 201 | pgd_idx = pgd_index(vaddr); |
@@ -165,7 +207,8 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) | |||
165 | pmd = pmd + pmd_index(vaddr); | 207 | pmd = pmd + pmd_index(vaddr); |
166 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); | 208 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); |
167 | pmd++, pmd_idx++) { | 209 | pmd++, pmd_idx++) { |
168 | one_page_table_init(pmd); | 210 | pte = page_table_kmap_check(one_page_table_init(pmd), |
211 | pmd, vaddr, pte); | ||
169 | 212 | ||
170 | vaddr += PMD_SIZE; | 213 | vaddr += PMD_SIZE; |
171 | } | 214 | } |
@@ -508,7 +551,6 @@ static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base) | |||
508 | * Fixed mappings, only the page table structure has to be | 551 | * Fixed mappings, only the page table structure has to be |
509 | * created - mappings will be set by set_fixmap(): | 552 | * created - mappings will be set by set_fixmap(): |
510 | */ | 553 | */ |
511 | early_ioremap_clear(); | ||
512 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | 554 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; |
513 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; | 555 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; |
514 | page_table_range_init(vaddr, end, pgd_base); | 556 | page_table_range_init(vaddr, end, pgd_base); |
@@ -801,7 +843,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse) | |||
801 | tables += PAGE_ALIGN(ptes * sizeof(pte_t)); | 843 | tables += PAGE_ALIGN(ptes * sizeof(pte_t)); |
802 | 844 | ||
803 | /* for fixmap */ | 845 | /* for fixmap */ |
804 | tables += PAGE_SIZE * 2; | 846 | tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t)); |
805 | 847 | ||
806 | /* | 848 | /* |
807 | * RED-PEN putting page tables only on node 0 could | 849 | * RED-PEN putting page tables only on node 0 could |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index bd85d42819e1..af750ab973b6 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -557,34 +557,9 @@ void __init early_ioremap_init(void) | |||
557 | } | 557 | } |
558 | } | 558 | } |
559 | 559 | ||
560 | void __init early_ioremap_clear(void) | ||
561 | { | ||
562 | pmd_t *pmd; | ||
563 | |||
564 | if (early_ioremap_debug) | ||
565 | printk(KERN_INFO "early_ioremap_clear()\n"); | ||
566 | |||
567 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); | ||
568 | pmd_clear(pmd); | ||
569 | paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT); | ||
570 | __flush_tlb_all(); | ||
571 | } | ||
572 | |||
573 | void __init early_ioremap_reset(void) | 560 | void __init early_ioremap_reset(void) |
574 | { | 561 | { |
575 | enum fixed_addresses idx; | ||
576 | unsigned long addr, phys; | ||
577 | pte_t *pte; | ||
578 | |||
579 | after_paging_init = 1; | 562 | after_paging_init = 1; |
580 | for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { | ||
581 | addr = fix_to_virt(idx); | ||
582 | pte = early_ioremap_pte(addr); | ||
583 | if (pte_present(*pte)) { | ||
584 | phys = pte_val(*pte) & PAGE_MASK; | ||
585 | set_fixmap(idx, phys); | ||
586 | } | ||
587 | } | ||
588 | } | 563 | } |
589 | 564 | ||
590 | static void __init __early_set_fixmap(enum fixed_addresses idx, | 565 | static void __init __early_set_fixmap(enum fixed_addresses idx, |