diff options
| -rw-r--r-- | arch/x86/mm/init.c | 9 | ||||
| -rw-r--r-- | arch/x86/mm/init_64.c | 63 |
2 files changed, 30 insertions, 42 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index c0e28a13de7..5863950ebe0 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
| @@ -33,7 +33,7 @@ int direct_gbpages | |||
| 33 | static void __init find_early_table_space(unsigned long end, int use_pse, | 33 | static void __init find_early_table_space(unsigned long end, int use_pse, |
| 34 | int use_gbpages) | 34 | int use_gbpages) |
| 35 | { | 35 | { |
| 36 | unsigned long puds, pmds, ptes, tables, start; | 36 | unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; |
| 37 | phys_addr_t base; | 37 | phys_addr_t base; |
| 38 | 38 | ||
| 39 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | 39 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; |
| @@ -73,12 +73,9 @@ static void __init find_early_table_space(unsigned long end, int use_pse, | |||
| 73 | * need roughly 0.5KB per GB. | 73 | * need roughly 0.5KB per GB. |
| 74 | */ | 74 | */ |
| 75 | #ifdef CONFIG_X86_32 | 75 | #ifdef CONFIG_X86_32 |
| 76 | start = 0x7000; | 76 | good_end = max_pfn_mapped << PAGE_SHIFT; |
| 77 | #else | ||
| 78 | start = 0x8000; | ||
| 79 | #endif | 77 | #endif |
| 80 | base = memblock_find_in_range(start, max_pfn_mapped<<PAGE_SHIFT, | 78 | base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); |
| 81 | tables, PAGE_SIZE); | ||
| 82 | if (base == MEMBLOCK_ERROR) | 79 | if (base == MEMBLOCK_ERROR) |
| 83 | panic("Cannot find space for the kernel page tables"); | 80 | panic("Cannot find space for the kernel page tables"); |
| 84 | 81 | ||
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 71a59296af8..024847dc81a 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
| @@ -333,12 +333,28 @@ static __ref void *alloc_low_page(unsigned long *phys) | |||
| 333 | return adr; | 333 | return adr; |
| 334 | } | 334 | } |
| 335 | 335 | ||
| 336 | static __ref void *map_low_page(void *virt) | ||
| 337 | { | ||
| 338 | void *adr; | ||
| 339 | unsigned long phys, left; | ||
| 340 | |||
| 341 | if (after_bootmem) | ||
| 342 | return virt; | ||
| 343 | |||
| 344 | phys = __pa(virt); | ||
| 345 | left = phys & (PAGE_SIZE - 1); | ||
| 346 | adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE); | ||
| 347 | adr = (void *)(((unsigned long)adr) | left); | ||
| 348 | |||
| 349 | return adr; | ||
| 350 | } | ||
| 351 | |||
| 336 | static __ref void unmap_low_page(void *adr) | 352 | static __ref void unmap_low_page(void *adr) |
| 337 | { | 353 | { |
| 338 | if (after_bootmem) | 354 | if (after_bootmem) |
| 339 | return; | 355 | return; |
| 340 | 356 | ||
| 341 | early_iounmap(adr, PAGE_SIZE); | 357 | early_iounmap((void *)((unsigned long)adr & PAGE_MASK), PAGE_SIZE); |
| 342 | } | 358 | } |
| 343 | 359 | ||
| 344 | static unsigned long __meminit | 360 | static unsigned long __meminit |
| @@ -386,15 +402,6 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, | |||
| 386 | } | 402 | } |
| 387 | 403 | ||
| 388 | static unsigned long __meminit | 404 | static unsigned long __meminit |
| 389 | phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end, | ||
| 390 | pgprot_t prot) | ||
| 391 | { | ||
| 392 | pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd); | ||
| 393 | |||
| 394 | return phys_pte_init(pte, address, end, prot); | ||
| 395 | } | ||
| 396 | |||
| 397 | static unsigned long __meminit | ||
| 398 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | 405 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, |
| 399 | unsigned long page_size_mask, pgprot_t prot) | 406 | unsigned long page_size_mask, pgprot_t prot) |
| 400 | { | 407 | { |
| @@ -420,8 +427,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
| 420 | if (pmd_val(*pmd)) { | 427 | if (pmd_val(*pmd)) { |
| 421 | if (!pmd_large(*pmd)) { | 428 | if (!pmd_large(*pmd)) { |
| 422 | spin_lock(&init_mm.page_table_lock); | 429 | spin_lock(&init_mm.page_table_lock); |
| 423 | last_map_addr = phys_pte_update(pmd, address, | 430 | pte = map_low_page((pte_t *)pmd_page_vaddr(*pmd)); |
| 431 | last_map_addr = phys_pte_init(pte, address, | ||
| 424 | end, prot); | 432 | end, prot); |
| 433 | unmap_low_page(pte); | ||
| 425 | spin_unlock(&init_mm.page_table_lock); | 434 | spin_unlock(&init_mm.page_table_lock); |
| 426 | continue; | 435 | continue; |
| 427 | } | 436 | } |
| @@ -468,18 +477,6 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
| 468 | } | 477 | } |
| 469 | 478 | ||
| 470 | static unsigned long __meminit | 479 | static unsigned long __meminit |
| 471 | phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end, | ||
| 472 | unsigned long page_size_mask, pgprot_t prot) | ||
| 473 | { | ||
| 474 | pmd_t *pmd = pmd_offset(pud, 0); | ||
| 475 | unsigned long last_map_addr; | ||
| 476 | |||
| 477 | last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask, prot); | ||
| 478 | __flush_tlb_all(); | ||
| 479 | return last_map_addr; | ||
| 480 | } | ||
| 481 | |||
| 482 | static unsigned long __meminit | ||
| 483 | phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, | 480 | phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, |
| 484 | unsigned long page_size_mask) | 481 | unsigned long page_size_mask) |
| 485 | { | 482 | { |
| @@ -504,8 +501,11 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, | |||
| 504 | 501 | ||
| 505 | if (pud_val(*pud)) { | 502 | if (pud_val(*pud)) { |
| 506 | if (!pud_large(*pud)) { | 503 | if (!pud_large(*pud)) { |
| 507 | last_map_addr = phys_pmd_update(pud, addr, end, | 504 | pmd = map_low_page(pmd_offset(pud, 0)); |
| 505 | last_map_addr = phys_pmd_init(pmd, addr, end, | ||
| 508 | page_size_mask, prot); | 506 | page_size_mask, prot); |
| 507 | unmap_low_page(pmd); | ||
| 508 | __flush_tlb_all(); | ||
| 509 | continue; | 509 | continue; |
| 510 | } | 510 | } |
| 511 | /* | 511 | /* |
| @@ -553,17 +553,6 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, | |||
| 553 | return last_map_addr; | 553 | return last_map_addr; |
| 554 | } | 554 | } |
| 555 | 555 | ||
| 556 | static unsigned long __meminit | ||
| 557 | phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end, | ||
| 558 | unsigned long page_size_mask) | ||
| 559 | { | ||
| 560 | pud_t *pud; | ||
| 561 | |||
| 562 | pud = (pud_t *)pgd_page_vaddr(*pgd); | ||
| 563 | |||
| 564 | return phys_pud_init(pud, addr, end, page_size_mask); | ||
| 565 | } | ||
| 566 | |||
| 567 | unsigned long __meminit | 556 | unsigned long __meminit |
| 568 | kernel_physical_mapping_init(unsigned long start, | 557 | kernel_physical_mapping_init(unsigned long start, |
| 569 | unsigned long end, | 558 | unsigned long end, |
| @@ -587,8 +576,10 @@ kernel_physical_mapping_init(unsigned long start, | |||
| 587 | next = end; | 576 | next = end; |
| 588 | 577 | ||
| 589 | if (pgd_val(*pgd)) { | 578 | if (pgd_val(*pgd)) { |
| 590 | last_map_addr = phys_pud_update(pgd, __pa(start), | 579 | pud = map_low_page((pud_t *)pgd_page_vaddr(*pgd)); |
| 580 | last_map_addr = phys_pud_init(pud, __pa(start), | ||
| 591 | __pa(end), page_size_mask); | 581 | __pa(end), page_size_mask); |
| 582 | unmap_low_page(pud); | ||
| 592 | continue; | 583 | continue; |
| 593 | } | 584 | } |
| 594 | 585 | ||
