diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-08-25 04:54:07 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-25 04:54:07 -0400 |
commit | e4f807c2b4d81636fc63993368646c5bfd42b22f (patch) | |
tree | 2ff100911b1ba4e26e3d9aad41edb9b48405f01e /arch/x86/mm/init_64.c | |
parent | 25258ef762bc4a05fa9c4523f7dae56e3fd01864 (diff) | |
parent | 83097aca8567a0bd593534853b71fe0fa9a75d69 (diff) |
Merge branch 'linus' into x86/xen
Conflicts:
arch/x86/kernel/paravirt.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/init_64.c')
-rw-r--r-- | arch/x86/mm/init_64.c | 36 |
1 files changed, 20 insertions, 16 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index a87ea0e4b3dc..d3746efb060d 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -241,7 +241,7 @@ static unsigned long __initdata table_start; | |||
241 | static unsigned long __meminitdata table_end; | 241 | static unsigned long __meminitdata table_end; |
242 | static unsigned long __meminitdata table_top; | 242 | static unsigned long __meminitdata table_top; |
243 | 243 | ||
244 | static __meminit void *alloc_low_page(unsigned long *phys) | 244 | static __ref void *alloc_low_page(unsigned long *phys) |
245 | { | 245 | { |
246 | unsigned long pfn = table_end++; | 246 | unsigned long pfn = table_end++; |
247 | void *adr; | 247 | void *adr; |
@@ -262,7 +262,7 @@ static __meminit void *alloc_low_page(unsigned long *phys) | |||
262 | return adr; | 262 | return adr; |
263 | } | 263 | } |
264 | 264 | ||
265 | static __meminit void unmap_low_page(void *adr) | 265 | static __ref void unmap_low_page(void *adr) |
266 | { | 266 | { |
267 | if (after_bootmem) | 267 | if (after_bootmem) |
268 | return; | 268 | return; |
@@ -336,9 +336,12 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
336 | } | 336 | } |
337 | 337 | ||
338 | if (pmd_val(*pmd)) { | 338 | if (pmd_val(*pmd)) { |
339 | if (!pmd_large(*pmd)) | 339 | if (!pmd_large(*pmd)) { |
340 | spin_lock(&init_mm.page_table_lock); | ||
340 | last_map_addr = phys_pte_update(pmd, address, | 341 | last_map_addr = phys_pte_update(pmd, address, |
341 | end); | 342 | end); |
343 | spin_unlock(&init_mm.page_table_lock); | ||
344 | } | ||
342 | /* Count entries we're using from level2_ident_pgt */ | 345 | /* Count entries we're using from level2_ident_pgt */ |
343 | if (start == 0) | 346 | if (start == 0) |
344 | pages++; | 347 | pages++; |
@@ -347,8 +350,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
347 | 350 | ||
348 | if (page_size_mask & (1<<PG_LEVEL_2M)) { | 351 | if (page_size_mask & (1<<PG_LEVEL_2M)) { |
349 | pages++; | 352 | pages++; |
353 | spin_lock(&init_mm.page_table_lock); | ||
350 | set_pte((pte_t *)pmd, | 354 | set_pte((pte_t *)pmd, |
351 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | 355 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); |
356 | spin_unlock(&init_mm.page_table_lock); | ||
352 | last_map_addr = (address & PMD_MASK) + PMD_SIZE; | 357 | last_map_addr = (address & PMD_MASK) + PMD_SIZE; |
353 | continue; | 358 | continue; |
354 | } | 359 | } |
@@ -357,7 +362,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
357 | last_map_addr = phys_pte_init(pte, address, end); | 362 | last_map_addr = phys_pte_init(pte, address, end); |
358 | unmap_low_page(pte); | 363 | unmap_low_page(pte); |
359 | 364 | ||
365 | spin_lock(&init_mm.page_table_lock); | ||
360 | pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); | 366 | pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); |
367 | spin_unlock(&init_mm.page_table_lock); | ||
361 | } | 368 | } |
362 | update_page_count(PG_LEVEL_2M, pages); | 369 | update_page_count(PG_LEVEL_2M, pages); |
363 | return last_map_addr; | 370 | return last_map_addr; |
@@ -370,9 +377,7 @@ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end, | |||
370 | pmd_t *pmd = pmd_offset(pud, 0); | 377 | pmd_t *pmd = pmd_offset(pud, 0); |
371 | unsigned long last_map_addr; | 378 | unsigned long last_map_addr; |
372 | 379 | ||
373 | spin_lock(&init_mm.page_table_lock); | ||
374 | last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); | 380 | last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); |
375 | spin_unlock(&init_mm.page_table_lock); | ||
376 | __flush_tlb_all(); | 381 | __flush_tlb_all(); |
377 | return last_map_addr; | 382 | return last_map_addr; |
378 | } | 383 | } |
@@ -408,20 +413,21 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, | |||
408 | 413 | ||
409 | if (page_size_mask & (1<<PG_LEVEL_1G)) { | 414 | if (page_size_mask & (1<<PG_LEVEL_1G)) { |
410 | pages++; | 415 | pages++; |
416 | spin_lock(&init_mm.page_table_lock); | ||
411 | set_pte((pte_t *)pud, | 417 | set_pte((pte_t *)pud, |
412 | pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | 418 | pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); |
419 | spin_unlock(&init_mm.page_table_lock); | ||
413 | last_map_addr = (addr & PUD_MASK) + PUD_SIZE; | 420 | last_map_addr = (addr & PUD_MASK) + PUD_SIZE; |
414 | continue; | 421 | continue; |
415 | } | 422 | } |
416 | 423 | ||
417 | pmd = alloc_low_page(&pmd_phys); | 424 | pmd = alloc_low_page(&pmd_phys); |
418 | |||
419 | spin_lock(&init_mm.page_table_lock); | ||
420 | last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); | 425 | last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); |
421 | unmap_low_page(pmd); | 426 | unmap_low_page(pmd); |
427 | |||
428 | spin_lock(&init_mm.page_table_lock); | ||
422 | pud_populate(&init_mm, pud, __va(pmd_phys)); | 429 | pud_populate(&init_mm, pud, __va(pmd_phys)); |
423 | spin_unlock(&init_mm.page_table_lock); | 430 | spin_unlock(&init_mm.page_table_lock); |
424 | |||
425 | } | 431 | } |
426 | __flush_tlb_all(); | 432 | __flush_tlb_all(); |
427 | update_page_count(PG_LEVEL_1G, pages); | 433 | update_page_count(PG_LEVEL_1G, pages); |
@@ -513,16 +519,14 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start, | |||
513 | continue; | 519 | continue; |
514 | } | 520 | } |
515 | 521 | ||
516 | if (after_bootmem) | 522 | pud = alloc_low_page(&pud_phys); |
517 | pud = pud_offset(pgd, start & PGDIR_MASK); | ||
518 | else | ||
519 | pud = alloc_low_page(&pud_phys); | ||
520 | |||
521 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), | 523 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), |
522 | page_size_mask); | 524 | page_size_mask); |
523 | unmap_low_page(pud); | 525 | unmap_low_page(pud); |
524 | pgd_populate(&init_mm, pgd_offset_k(start), | 526 | |
525 | __va(pud_phys)); | 527 | spin_lock(&init_mm.page_table_lock); |
528 | pgd_populate(&init_mm, pgd, __va(pud_phys)); | ||
529 | spin_unlock(&init_mm.page_table_lock); | ||
526 | } | 530 | } |
527 | 531 | ||
528 | return last_map_addr; | 532 | return last_map_addr; |