diff options
author | Vivek Goyal <vgoyal@in.ibm.com> | 2007-05-02 13:27:06 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2007-05-02 13:27:06 -0400 |
commit | dafe41ee3a9389c08c91cdfd8670295f20f89e04 (patch) | |
tree | c4e8feff6c01728465e670cc87295444d996f318 | |
parent | 9d291e787b2b71d1b57e5fbb24ba9c70e748ed84 (diff) |
[PATCH] x86-64: Kill temp boot pmds
Early in the boot process we need the ability to set
up temporary mappings, before our normal mechanisms are
initialized. Currently this is used to map pages that
are part of the page tables we are building and pages
during the dmi scan.
The core problem is that we are using the user portion of
the page tables to implement this. Which means that while
this mechanism is active we cannot catch NULL pointer dereferences
and we deviate from the normal ways of handling things.
In this patch I modify early_ioremap to map pages into
the kernel portion of address space, roughly where
we will later put modules, and I make the discovery of
which addresses we can use dynamic which removes all
kinds of static limits and remove the dependencies
on implementation details between different parts of the code.
Now alloc_low_page() and unmap_low_page() use
early_iomap() and early_iounmap() to allocate/map and
unmap a page.
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Vivek Goyal <vgoyal@in.ibm.com>
Signed-off-by: Andi Kleen <ak@suse.de>
-rw-r--r-- | arch/x86_64/kernel/head.S | 3 | ||||
-rw-r--r-- | arch/x86_64/mm/init.c | 100 |
2 files changed, 45 insertions, 58 deletions
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S index 598a4d0351fc..118c6088198a 100644 --- a/arch/x86_64/kernel/head.S +++ b/arch/x86_64/kernel/head.S | |||
@@ -288,9 +288,6 @@ NEXT_PAGE(level2_ident_pgt) | |||
288 | .quad i << 21 | 0x083 | 288 | .quad i << 21 | 0x083 |
289 | i = i + 1 | 289 | i = i + 1 |
290 | .endr | 290 | .endr |
291 | /* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */ | ||
292 | .globl temp_boot_pmds | ||
293 | temp_boot_pmds: | ||
294 | .fill 492,8,0 | 291 | .fill 492,8,0 |
295 | 292 | ||
296 | NEXT_PAGE(level2_kernel_pgt) | 293 | NEXT_PAGE(level2_kernel_pgt) |
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 5ca61731f550..4ab3d40aac90 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c | |||
@@ -167,23 +167,9 @@ __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | |||
167 | 167 | ||
168 | unsigned long __initdata table_start, table_end; | 168 | unsigned long __initdata table_start, table_end; |
169 | 169 | ||
170 | extern pmd_t temp_boot_pmds[]; | 170 | static __meminit void *alloc_low_page(unsigned long *phys) |
171 | |||
172 | static struct temp_map { | ||
173 | pmd_t *pmd; | ||
174 | void *address; | ||
175 | int allocated; | ||
176 | } temp_mappings[] __initdata = { | ||
177 | { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) }, | ||
178 | { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) }, | ||
179 | {} | ||
180 | }; | ||
181 | |||
182 | static __meminit void *alloc_low_page(int *index, unsigned long *phys) | ||
183 | { | 171 | { |
184 | struct temp_map *ti; | 172 | unsigned long pfn = table_end++; |
185 | int i; | ||
186 | unsigned long pfn = table_end++, paddr; | ||
187 | void *adr; | 173 | void *adr; |
188 | 174 | ||
189 | if (after_bootmem) { | 175 | if (after_bootmem) { |
@@ -194,57 +180,63 @@ static __meminit void *alloc_low_page(int *index, unsigned long *phys) | |||
194 | 180 | ||
195 | if (pfn >= end_pfn) | 181 | if (pfn >= end_pfn) |
196 | panic("alloc_low_page: ran out of memory"); | 182 | panic("alloc_low_page: ran out of memory"); |
197 | for (i = 0; temp_mappings[i].allocated; i++) { | 183 | |
198 | if (!temp_mappings[i].pmd) | 184 | adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE); |
199 | panic("alloc_low_page: ran out of temp mappings"); | ||
200 | } | ||
201 | ti = &temp_mappings[i]; | ||
202 | paddr = (pfn << PAGE_SHIFT) & PMD_MASK; | ||
203 | set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE)); | ||
204 | ti->allocated = 1; | ||
205 | __flush_tlb(); | ||
206 | adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK); | ||
207 | memset(adr, 0, PAGE_SIZE); | 185 | memset(adr, 0, PAGE_SIZE); |
208 | *index = i; | 186 | *phys = pfn * PAGE_SIZE; |
209 | *phys = pfn * PAGE_SIZE; | 187 | return adr; |
210 | return adr; | 188 | } |
211 | } | ||
212 | 189 | ||
213 | static __meminit void unmap_low_page(int i) | 190 | static __meminit void unmap_low_page(void *adr) |
214 | { | 191 | { |
215 | struct temp_map *ti; | ||
216 | 192 | ||
217 | if (after_bootmem) | 193 | if (after_bootmem) |
218 | return; | 194 | return; |
219 | 195 | ||
220 | ti = &temp_mappings[i]; | 196 | early_iounmap(adr, PAGE_SIZE); |
221 | set_pmd(ti->pmd, __pmd(0)); | ||
222 | ti->allocated = 0; | ||
223 | } | 197 | } |
224 | 198 | ||
225 | /* Must run before zap_low_mappings */ | 199 | /* Must run before zap_low_mappings */ |
226 | __init void *early_ioremap(unsigned long addr, unsigned long size) | 200 | __init void *early_ioremap(unsigned long addr, unsigned long size) |
227 | { | 201 | { |
228 | unsigned long map = round_down(addr, LARGE_PAGE_SIZE); | 202 | unsigned long vaddr; |
229 | 203 | pmd_t *pmd, *last_pmd; | |
230 | /* actually usually some more */ | 204 | int i, pmds; |
231 | if (size >= LARGE_PAGE_SIZE) { | 205 | |
232 | return NULL; | 206 | pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE; |
207 | vaddr = __START_KERNEL_map; | ||
208 | pmd = level2_kernel_pgt; | ||
209 | last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1; | ||
210 | for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) { | ||
211 | for (i = 0; i < pmds; i++) { | ||
212 | if (pmd_present(pmd[i])) | ||
213 | goto next; | ||
214 | } | ||
215 | vaddr += addr & ~PMD_MASK; | ||
216 | addr &= PMD_MASK; | ||
217 | for (i = 0; i < pmds; i++, addr += PMD_SIZE) | ||
218 | set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE)); | ||
219 | __flush_tlb(); | ||
220 | return (void *)vaddr; | ||
221 | next: | ||
222 | ; | ||
233 | } | 223 | } |
234 | set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE)); | 224 | printk("early_ioremap(0x%lx, %lu) failed\n", addr, size); |
235 | map += LARGE_PAGE_SIZE; | 225 | return NULL; |
236 | set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE)); | ||
237 | __flush_tlb(); | ||
238 | return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1)); | ||
239 | } | 226 | } |
240 | 227 | ||
241 | /* To avoid virtual aliases later */ | 228 | /* To avoid virtual aliases later */ |
242 | __init void early_iounmap(void *addr, unsigned long size) | 229 | __init void early_iounmap(void *addr, unsigned long size) |
243 | { | 230 | { |
244 | if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address) | 231 | unsigned long vaddr; |
245 | printk("early_iounmap: bad address %p\n", addr); | 232 | pmd_t *pmd; |
246 | set_pmd(temp_mappings[0].pmd, __pmd(0)); | 233 | int i, pmds; |
247 | set_pmd(temp_mappings[1].pmd, __pmd(0)); | 234 | |
235 | vaddr = (unsigned long)addr; | ||
236 | pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE; | ||
237 | pmd = level2_kernel_pgt + pmd_index(vaddr); | ||
238 | for (i = 0; i < pmds; i++) | ||
239 | pmd_clear(pmd + i); | ||
248 | __flush_tlb(); | 240 | __flush_tlb(); |
249 | } | 241 | } |
250 | 242 | ||
@@ -289,7 +281,6 @@ static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigne | |||
289 | 281 | ||
290 | 282 | ||
291 | for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) { | 283 | for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) { |
292 | int map; | ||
293 | unsigned long pmd_phys; | 284 | unsigned long pmd_phys; |
294 | pud_t *pud = pud_page + pud_index(addr); | 285 | pud_t *pud = pud_page + pud_index(addr); |
295 | pmd_t *pmd; | 286 | pmd_t *pmd; |
@@ -307,12 +298,12 @@ static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigne | |||
307 | continue; | 298 | continue; |
308 | } | 299 | } |
309 | 300 | ||
310 | pmd = alloc_low_page(&map, &pmd_phys); | 301 | pmd = alloc_low_page(&pmd_phys); |
311 | spin_lock(&init_mm.page_table_lock); | 302 | spin_lock(&init_mm.page_table_lock); |
312 | set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); | 303 | set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); |
313 | phys_pmd_init(pmd, addr, end); | 304 | phys_pmd_init(pmd, addr, end); |
314 | spin_unlock(&init_mm.page_table_lock); | 305 | spin_unlock(&init_mm.page_table_lock); |
315 | unmap_low_page(map); | 306 | unmap_low_page(pmd); |
316 | } | 307 | } |
317 | __flush_tlb(); | 308 | __flush_tlb(); |
318 | } | 309 | } |
@@ -364,7 +355,6 @@ void __meminit init_memory_mapping(unsigned long start, unsigned long end) | |||
364 | end = (unsigned long)__va(end); | 355 | end = (unsigned long)__va(end); |
365 | 356 | ||
366 | for (; start < end; start = next) { | 357 | for (; start < end; start = next) { |
367 | int map; | ||
368 | unsigned long pud_phys; | 358 | unsigned long pud_phys; |
369 | pgd_t *pgd = pgd_offset_k(start); | 359 | pgd_t *pgd = pgd_offset_k(start); |
370 | pud_t *pud; | 360 | pud_t *pud; |
@@ -372,7 +362,7 @@ void __meminit init_memory_mapping(unsigned long start, unsigned long end) | |||
372 | if (after_bootmem) | 362 | if (after_bootmem) |
373 | pud = pud_offset(pgd, start & PGDIR_MASK); | 363 | pud = pud_offset(pgd, start & PGDIR_MASK); |
374 | else | 364 | else |
375 | pud = alloc_low_page(&map, &pud_phys); | 365 | pud = alloc_low_page(&pud_phys); |
376 | 366 | ||
377 | next = start + PGDIR_SIZE; | 367 | next = start + PGDIR_SIZE; |
378 | if (next > end) | 368 | if (next > end) |
@@ -380,7 +370,7 @@ void __meminit init_memory_mapping(unsigned long start, unsigned long end) | |||
380 | phys_pud_init(pud, __pa(start), __pa(next)); | 370 | phys_pud_init(pud, __pa(start), __pa(next)); |
381 | if (!after_bootmem) | 371 | if (!after_bootmem) |
382 | set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys)); | 372 | set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys)); |
383 | unmap_low_page(map); | 373 | unmap_low_page(pud); |
384 | } | 374 | } |
385 | 375 | ||
386 | if (!after_bootmem) | 376 | if (!after_bootmem) |