diff options
author | Scott Wood <scottwood@freescale.com> | 2014-08-08 19:44:01 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2014-08-13 01:13:25 -0400 |
commit | 5d61a2172a0142c635ab6d7c3b1589af85a3603e (patch) | |
tree | 586e945fe3bd477a0575f9492adad84966517e49 /arch/powerpc | |
parent | 58d08e3b2c2033354b91467da33deffa06360c28 (diff) |
powerpc/nohash: Split __early_init_mmu() into boot and secondary
__early_init_mmu() does some things that are really only needed by the
boot cpu. On FSL booke, This includes calling
memblock_enforce_memory_limit(), which is labelled __init. Secondary
cpu init code can't be __init as that would break CPU hotplug.
While it's probably a bug that memblock_enforce_memory_limit() isn't
__init_memblock instead, there's no reason why we should be doing this
stuff for secondary cpus in the first place.
Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/mm/tlb_nohash.c | 111 |
1 files changed, 66 insertions, 45 deletions
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 92cb18d52ea8..f38ea4df6a85 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c | |||
@@ -581,42 +581,10 @@ static void setup_mmu_htw(void) | |||
581 | /* | 581 | /* |
582 | * Early initialization of the MMU TLB code | 582 | * Early initialization of the MMU TLB code |
583 | */ | 583 | */ |
584 | static void __early_init_mmu(int boot_cpu) | 584 | static void early_init_this_mmu(void) |
585 | { | 585 | { |
586 | unsigned int mas4; | 586 | unsigned int mas4; |
587 | 587 | ||
588 | /* XXX This will have to be decided at runtime, but right | ||
589 | * now our boot and TLB miss code hard wires it. Ideally | ||
590 | * we should find out a suitable page size and patch the | ||
591 | * TLB miss code (either that or use the PACA to store | ||
592 | * the value we want) | ||
593 | */ | ||
594 | mmu_linear_psize = MMU_PAGE_1G; | ||
595 | |||
596 | /* XXX This should be decided at runtime based on supported | ||
597 | * page sizes in the TLB, but for now let's assume 16M is | ||
598 | * always there and a good fit (which it probably is) | ||
599 | * | ||
600 | * Freescale booke only supports 4K pages in TLB0, so use that. | ||
601 | */ | ||
602 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) | ||
603 | mmu_vmemmap_psize = MMU_PAGE_4K; | ||
604 | else | ||
605 | mmu_vmemmap_psize = MMU_PAGE_16M; | ||
606 | |||
607 | /* XXX This code only checks for TLB 0 capabilities and doesn't | ||
608 | * check what page size combos are supported by the HW. It | ||
609 | * also doesn't handle the case where a separate array holds | ||
610 | * the IND entries from the array loaded by the PT. | ||
611 | */ | ||
612 | if (boot_cpu) { | ||
613 | /* Look for supported page sizes */ | ||
614 | setup_page_sizes(); | ||
615 | |||
616 | /* Look for HW tablewalk support */ | ||
617 | setup_mmu_htw(); | ||
618 | } | ||
619 | |||
620 | /* Set MAS4 based on page table setting */ | 588 | /* Set MAS4 based on page table setting */ |
621 | 589 | ||
622 | mas4 = 0x4 << MAS4_WIMGED_SHIFT; | 590 | mas4 = 0x4 << MAS4_WIMGED_SHIFT; |
@@ -650,11 +618,6 @@ static void __early_init_mmu(int boot_cpu) | |||
650 | } | 618 | } |
651 | mtspr(SPRN_MAS4, mas4); | 619 | mtspr(SPRN_MAS4, mas4); |
652 | 620 | ||
653 | /* Set the global containing the top of the linear mapping | ||
654 | * for use by the TLB miss code | ||
655 | */ | ||
656 | linear_map_top = memblock_end_of_DRAM(); | ||
657 | |||
658 | #ifdef CONFIG_PPC_FSL_BOOK3E | 621 | #ifdef CONFIG_PPC_FSL_BOOK3E |
659 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { | 622 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { |
660 | unsigned int num_cams; | 623 | unsigned int num_cams; |
@@ -662,10 +625,49 @@ static void __early_init_mmu(int boot_cpu) | |||
662 | /* use a quarter of the TLBCAM for bolted linear map */ | 625 | /* use a quarter of the TLBCAM for bolted linear map */ |
663 | num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; | 626 | num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; |
664 | linear_map_top = map_mem_in_cams(linear_map_top, num_cams); | 627 | linear_map_top = map_mem_in_cams(linear_map_top, num_cams); |
628 | } | ||
629 | #endif | ||
665 | 630 | ||
666 | /* limit memory so we dont have linear faults */ | 631 | /* A sync won't hurt us after mucking around with |
667 | memblock_enforce_memory_limit(linear_map_top); | 632 | * the MMU configuration |
633 | */ | ||
634 | mb(); | ||
635 | } | ||
668 | 636 | ||
637 | static void __init early_init_mmu_global(void) | ||
638 | { | ||
639 | /* XXX This will have to be decided at runtime, but right | ||
640 | * now our boot and TLB miss code hard wires it. Ideally | ||
641 | * we should find out a suitable page size and patch the | ||
642 | * TLB miss code (either that or use the PACA to store | ||
643 | * the value we want) | ||
644 | */ | ||
645 | mmu_linear_psize = MMU_PAGE_1G; | ||
646 | |||
647 | /* XXX This should be decided at runtime based on supported | ||
648 | * page sizes in the TLB, but for now let's assume 16M is | ||
649 | * always there and a good fit (which it probably is) | ||
650 | * | ||
651 | * Freescale booke only supports 4K pages in TLB0, so use that. | ||
652 | */ | ||
653 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) | ||
654 | mmu_vmemmap_psize = MMU_PAGE_4K; | ||
655 | else | ||
656 | mmu_vmemmap_psize = MMU_PAGE_16M; | ||
657 | |||
658 | /* XXX This code only checks for TLB 0 capabilities and doesn't | ||
659 | * check what page size combos are supported by the HW. It | ||
660 | * also doesn't handle the case where a separate array holds | ||
661 | * the IND entries from the array loaded by the PT. | ||
662 | */ | ||
663 | /* Look for supported page sizes */ | ||
664 | setup_page_sizes(); | ||
665 | |||
666 | /* Look for HW tablewalk support */ | ||
667 | setup_mmu_htw(); | ||
668 | |||
669 | #ifdef CONFIG_PPC_FSL_BOOK3E | ||
670 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { | ||
669 | if (book3e_htw_mode == PPC_HTW_NONE) { | 671 | if (book3e_htw_mode == PPC_HTW_NONE) { |
670 | extlb_level_exc = EX_TLB_SIZE; | 672 | extlb_level_exc = EX_TLB_SIZE; |
671 | patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); | 673 | patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); |
@@ -675,22 +677,41 @@ static void __early_init_mmu(int boot_cpu) | |||
675 | } | 677 | } |
676 | #endif | 678 | #endif |
677 | 679 | ||
678 | /* A sync won't hurt us after mucking around with | 680 | /* Set the global containing the top of the linear mapping |
679 | * the MMU configuration | 681 | * for use by the TLB miss code |
680 | */ | 682 | */ |
681 | mb(); | 683 | linear_map_top = memblock_end_of_DRAM(); |
684 | } | ||
685 | |||
686 | static void __init early_mmu_set_memory_limit(void) | ||
687 | { | ||
688 | #ifdef CONFIG_PPC_FSL_BOOK3E | ||
689 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { | ||
690 | /* | ||
691 | * Limit memory so we dont have linear faults. | ||
692 | * Unlike memblock_set_current_limit, which limits | ||
693 | * memory available during early boot, this permanently | ||
694 | * reduces the memory available to Linux. We need to | ||
695 | * do this because highmem is not supported on 64-bit. | ||
696 | */ | ||
697 | memblock_enforce_memory_limit(linear_map_top); | ||
698 | } | ||
699 | #endif | ||
682 | 700 | ||
683 | memblock_set_current_limit(linear_map_top); | 701 | memblock_set_current_limit(linear_map_top); |
684 | } | 702 | } |
685 | 703 | ||
704 | /* boot cpu only */ | ||
686 | void __init early_init_mmu(void) | 705 | void __init early_init_mmu(void) |
687 | { | 706 | { |
688 | __early_init_mmu(1); | 707 | early_init_mmu_global(); |
708 | early_init_this_mmu(); | ||
709 | early_mmu_set_memory_limit(); | ||
689 | } | 710 | } |
690 | 711 | ||
691 | void early_init_mmu_secondary(void) | 712 | void early_init_mmu_secondary(void) |
692 | { | 713 | { |
693 | __early_init_mmu(0); | 714 | early_init_this_mmu(); |
694 | } | 715 | } |
695 | 716 | ||
696 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | 717 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, |