diff options
author | Yinghai Lu <yhlu.kernel@gmail.com> | 2008-07-08 04:41:05 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-09 03:37:45 -0400 |
commit | b50efd2a55fc1344654875369d458bb6838bd37a (patch) | |
tree | 1fa6e771e65efa05d6228d2852d8f9b60265edd5 /arch/x86/mm | |
parent | 26e9e57b106445bbd8c965985e4e8af5293ae005 (diff) |
x86: introduce page_size_mask for 64bit
prepare for overmapped patch
also printout last_map_addr together with end
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/init_64.c | 98 |
1 files changed, 63 insertions, 35 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 57d5eff754c9..7227a0a3f3a0 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -340,7 +340,8 @@ phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end) | |||
340 | } | 340 | } |
341 | 341 | ||
342 | static unsigned long __meminit | 342 | static unsigned long __meminit |
343 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) | 343 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, |
344 | unsigned long page_size_mask) | ||
344 | { | 345 | { |
345 | unsigned long pages = 0; | 346 | unsigned long pages = 0; |
346 | 347 | ||
@@ -365,7 +366,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) | |||
365 | continue; | 366 | continue; |
366 | } | 367 | } |
367 | 368 | ||
368 | if (cpu_has_pse) { | 369 | if (page_size_mask & (1<<PG_LEVEL_2M)) { |
369 | pages++; | 370 | pages++; |
370 | set_pte((pte_t *)pmd, | 371 | set_pte((pte_t *)pmd, |
371 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | 372 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); |
@@ -383,20 +384,22 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) | |||
383 | } | 384 | } |
384 | 385 | ||
385 | static unsigned long __meminit | 386 | static unsigned long __meminit |
386 | phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) | 387 | phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end, |
388 | unsigned long page_size_mask) | ||
387 | { | 389 | { |
388 | pmd_t *pmd = pmd_offset(pud, 0); | 390 | pmd_t *pmd = pmd_offset(pud, 0); |
389 | unsigned long last_map_addr; | 391 | unsigned long last_map_addr; |
390 | 392 | ||
391 | spin_lock(&init_mm.page_table_lock); | 393 | spin_lock(&init_mm.page_table_lock); |
392 | last_map_addr = phys_pmd_init(pmd, address, end); | 394 | last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); |
393 | spin_unlock(&init_mm.page_table_lock); | 395 | spin_unlock(&init_mm.page_table_lock); |
394 | __flush_tlb_all(); | 396 | __flush_tlb_all(); |
395 | return last_map_addr; | 397 | return last_map_addr; |
396 | } | 398 | } |
397 | 399 | ||
398 | static unsigned long __meminit | 400 | static unsigned long __meminit |
399 | phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) | 401 | phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, |
402 | unsigned long page_size_mask) | ||
400 | { | 403 | { |
401 | unsigned long pages = 0; | 404 | unsigned long pages = 0; |
402 | unsigned long last_map_addr = end; | 405 | unsigned long last_map_addr = end; |
@@ -418,11 +421,12 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) | |||
418 | 421 | ||
419 | if (pud_val(*pud)) { | 422 | if (pud_val(*pud)) { |
420 | if (!pud_large(*pud)) | 423 | if (!pud_large(*pud)) |
421 | last_map_addr = phys_pmd_update(pud, addr, end); | 424 | last_map_addr = phys_pmd_update(pud, addr, end, |
425 | page_size_mask); | ||
422 | continue; | 426 | continue; |
423 | } | 427 | } |
424 | 428 | ||
425 | if (direct_gbpages) { | 429 | if (page_size_mask & (1<<PG_LEVEL_1G)) { |
426 | pages++; | 430 | pages++; |
427 | set_pte((pte_t *)pud, | 431 | set_pte((pte_t *)pud, |
428 | pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | 432 | pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); |
@@ -433,7 +437,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) | |||
433 | pmd = alloc_low_page(&pmd_phys); | 437 | pmd = alloc_low_page(&pmd_phys); |
434 | 438 | ||
435 | spin_lock(&init_mm.page_table_lock); | 439 | spin_lock(&init_mm.page_table_lock); |
436 | last_map_addr = phys_pmd_init(pmd, addr, end); | 440 | last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); |
437 | unmap_low_page(pmd); | 441 | unmap_low_page(pmd); |
438 | pud_populate(&init_mm, pud, __va(pmd_phys)); | 442 | pud_populate(&init_mm, pud, __va(pmd_phys)); |
439 | spin_unlock(&init_mm.page_table_lock); | 443 | spin_unlock(&init_mm.page_table_lock); |
@@ -446,13 +450,14 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) | |||
446 | } | 450 | } |
447 | 451 | ||
448 | static unsigned long __meminit | 452 | static unsigned long __meminit |
449 | phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end) | 453 | phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end, |
454 | unsigned long page_size_mask) | ||
450 | { | 455 | { |
451 | pud_t *pud; | 456 | pud_t *pud; |
452 | 457 | ||
453 | pud = (pud_t *)pgd_page_vaddr(*pgd); | 458 | pud = (pud_t *)pgd_page_vaddr(*pgd); |
454 | 459 | ||
455 | return phys_pud_init(pud, addr, end); | 460 | return phys_pud_init(pud, addr, end, page_size_mask); |
456 | } | 461 | } |
457 | 462 | ||
458 | static void __init find_early_table_space(unsigned long end) | 463 | static void __init find_early_table_space(unsigned long end) |
@@ -608,29 +613,12 @@ static void __init early_memtest(unsigned long start, unsigned long end) | |||
608 | } | 613 | } |
609 | #endif | 614 | #endif |
610 | 615 | ||
611 | /* | 616 | static unsigned long __init kernel_physical_mapping_init(unsigned long start, |
612 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. | 617 | unsigned long end, |
613 | * This runs before bootmem is initialized and gets pages directly from | 618 | unsigned long page_size_mask) |
614 | * the physical memory. To access them they are temporarily mapped. | ||
615 | */ | ||
616 | unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long end) | ||
617 | { | 619 | { |
618 | unsigned long next, last_map_addr = end; | ||
619 | unsigned long start_phys = start, end_phys = end; | ||
620 | 620 | ||
621 | printk(KERN_INFO "init_memory_mapping\n"); | 621 | unsigned long next, last_map_addr = end; |
622 | |||
623 | /* | ||
624 | * Find space for the kernel direct mapping tables. | ||
625 | * | ||
626 | * Later we should allocate these tables in the local node of the | ||
627 | * memory mapped. Unfortunately this is done currently before the | ||
628 | * nodes are discovered. | ||
629 | */ | ||
630 | if (!after_bootmem) { | ||
631 | init_gbpages(); | ||
632 | find_early_table_space(end); | ||
633 | } | ||
634 | 622 | ||
635 | start = (unsigned long)__va(start); | 623 | start = (unsigned long)__va(start); |
636 | end = (unsigned long)__va(end); | 624 | end = (unsigned long)__va(end); |
@@ -645,7 +633,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned lon | |||
645 | next = end; | 633 | next = end; |
646 | 634 | ||
647 | if (pgd_val(*pgd)) { | 635 | if (pgd_val(*pgd)) { |
648 | last_map_addr = phys_pud_update(pgd, __pa(start), __pa(end)); | 636 | last_map_addr = phys_pud_update(pgd, __pa(start), |
637 | __pa(end), page_size_mask); | ||
649 | continue; | 638 | continue; |
650 | } | 639 | } |
651 | 640 | ||
@@ -654,22 +643,61 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned lon | |||
654 | else | 643 | else |
655 | pud = alloc_low_page(&pud_phys); | 644 | pud = alloc_low_page(&pud_phys); |
656 | 645 | ||
657 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(next)); | 646 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), |
647 | page_size_mask); | ||
658 | unmap_low_page(pud); | 648 | unmap_low_page(pud); |
659 | pgd_populate(&init_mm, pgd_offset_k(start), | 649 | pgd_populate(&init_mm, pgd_offset_k(start), |
660 | __va(pud_phys)); | 650 | __va(pud_phys)); |
661 | } | 651 | } |
662 | 652 | ||
653 | return last_map_addr; | ||
654 | } | ||
655 | /* | ||
656 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. | ||
657 | * This runs before bootmem is initialized and gets pages directly from | ||
658 | * the physical memory. To access them they are temporarily mapped. | ||
659 | */ | ||
660 | unsigned long __init_refok init_memory_mapping(unsigned long start, | ||
661 | unsigned long end) | ||
662 | { | ||
663 | unsigned long last_map_addr; | ||
664 | unsigned long page_size_mask = 0; | ||
665 | |||
666 | printk(KERN_INFO "init_memory_mapping\n"); | ||
667 | |||
668 | /* | ||
669 | * Find space for the kernel direct mapping tables. | ||
670 | * | ||
671 | * Later we should allocate these tables in the local node of the | ||
672 | * memory mapped. Unfortunately this is done currently before the | ||
673 | * nodes are discovered. | ||
674 | */ | ||
675 | if (!after_bootmem) { | ||
676 | init_gbpages(); | ||
677 | find_early_table_space(end); | ||
678 | } | ||
679 | |||
680 | if (direct_gbpages) | ||
681 | page_size_mask |= 1 << PG_LEVEL_1G; | ||
682 | if (cpu_has_pse) | ||
683 | page_size_mask |= 1 << PG_LEVEL_2M; | ||
684 | |||
685 | last_map_addr = kernel_physical_mapping_init(start, end, | ||
686 | page_size_mask); | ||
687 | |||
663 | if (!after_bootmem) | 688 | if (!after_bootmem) |
664 | mmu_cr4_features = read_cr4(); | 689 | mmu_cr4_features = read_cr4(); |
665 | __flush_tlb_all(); | 690 | __flush_tlb_all(); |
666 | 691 | ||
667 | if (!after_bootmem) | 692 | if (!after_bootmem && table_end > table_start) |
668 | reserve_early(table_start << PAGE_SHIFT, | 693 | reserve_early(table_start << PAGE_SHIFT, |
669 | table_end << PAGE_SHIFT, "PGTABLE"); | 694 | table_end << PAGE_SHIFT, "PGTABLE"); |
670 | 695 | ||
696 | printk(KERN_INFO "last_map_addr: %lx end: %lx\n", | ||
697 | last_map_addr, end); | ||
698 | |||
671 | if (!after_bootmem) | 699 | if (!after_bootmem) |
672 | early_memtest(start_phys, end_phys); | 700 | early_memtest(start, end); |
673 | 701 | ||
674 | return last_map_addr >> PAGE_SHIFT; | 702 | return last_map_addr >> PAGE_SHIFT; |
675 | } | 703 | } |