diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2009-10-26 15:24:31 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-10-30 02:20:58 -0400 |
commit | d1837cba5d5d5458c09f0a2849db2d3c203cb8e9 (patch) | |
tree | 144a4eb43ed6b9909133dc1ac0619d813e4cb131 /arch/powerpc/mm/hugetlbpage.c | |
parent | a4fe3ce7699bfe1bd88f816b55d42d8fe1dac655 (diff) |
powerpc/mm: Cleanup initialization of hugepages on powerpc
This patch simplifies the logic used to initialize hugepages on
powerpc. The somewhat oddly named set_huge_psize() is renamed to
add_huge_page_size() and now does all necessary verification of
whether it's given a valid hugepage sizes (instead of just some) and
instantiates the generic hstate structure (but no more).
hugetlbpage_init() now steps through the available pagesizes, checks
if they're valid for hugepages by calling add_huge_page_size() and
initializes the kmem_caches for the hugepage pagetables. This means
we can now eliminate the mmu_huge_psizes array, since we no longer
need to pass the sizing information for the pagetable caches from
set_huge_psize() into hugetlbpage_init()
Determination of the default huge page size is also moved from the
hash code into the general hugepage code.
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 128 |
1 files changed, 62 insertions, 66 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 95220a5dee58..a7161c07886d 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -37,27 +37,17 @@ | |||
37 | static unsigned long gpage_freearray[MAX_NUMBER_GPAGES]; | 37 | static unsigned long gpage_freearray[MAX_NUMBER_GPAGES]; |
38 | static unsigned nr_gpages; | 38 | static unsigned nr_gpages; |
39 | 39 | ||
40 | /* Array of valid huge page sizes - non-zero value(hugepte_shift) is | ||
41 | * stored for the huge page sizes that are valid. | ||
42 | */ | ||
43 | static unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */ | ||
44 | |||
45 | /* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad() | 40 | /* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad() |
46 | * will choke on pointers to hugepte tables, which is handy for | 41 | * will choke on pointers to hugepte tables, which is handy for |
47 | * catching screwups early. */ | 42 | * catching screwups early. */ |
48 | 43 | ||
49 | static inline int shift_to_mmu_psize(unsigned int shift) | 44 | static inline int shift_to_mmu_psize(unsigned int shift) |
50 | { | 45 | { |
51 | switch (shift) { | 46 | int psize; |
52 | #ifndef CONFIG_PPC_64K_PAGES | 47 | |
53 | case PAGE_SHIFT_64K: | 48 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) |
54 | return MMU_PAGE_64K; | 49 | if (mmu_psize_defs[psize].shift == shift) |
55 | #endif | 50 | return psize; |
56 | case PAGE_SHIFT_16M: | ||
57 | return MMU_PAGE_16M; | ||
58 | case PAGE_SHIFT_16G: | ||
59 | return MMU_PAGE_16G; | ||
60 | } | ||
61 | return -1; | 51 | return -1; |
62 | } | 52 | } |
63 | 53 | ||
@@ -502,8 +492,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
502 | struct hstate *hstate = hstate_file(file); | 492 | struct hstate *hstate = hstate_file(file); |
503 | int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); | 493 | int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); |
504 | 494 | ||
505 | if (!mmu_huge_psizes[mmu_psize]) | ||
506 | return -EINVAL; | ||
507 | return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); | 495 | return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); |
508 | } | 496 | } |
509 | 497 | ||
@@ -666,47 +654,46 @@ repeat: | |||
666 | return err; | 654 | return err; |
667 | } | 655 | } |
668 | 656 | ||
669 | static void __init set_huge_psize(int psize) | 657 | static int __init add_huge_page_size(unsigned long long size) |
670 | { | 658 | { |
671 | unsigned pdshift; | 659 | int shift = __ffs(size); |
660 | int mmu_psize; | ||
672 | 661 | ||
673 | /* Check that it is a page size supported by the hardware and | 662 | /* Check that it is a page size supported by the hardware and |
674 | * that it fits within pagetable limits. */ | 663 | * that it fits within pagetable and slice limits. */ |
675 | if (mmu_psize_defs[psize].shift && | 664 | if (!is_power_of_2(size) |
676 | mmu_psize_defs[psize].shift < SID_SHIFT_1T && | 665 | || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT)) |
677 | (mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT || | 666 | return -EINVAL; |
678 | mmu_psize_defs[psize].shift == PAGE_SHIFT_64K || | ||
679 | mmu_psize_defs[psize].shift == PAGE_SHIFT_16G)) { | ||
680 | /* Return if huge page size has already been setup or is the | ||
681 | * same as the base page size. */ | ||
682 | if (mmu_huge_psizes[psize] || | ||
683 | mmu_psize_defs[psize].shift == PAGE_SHIFT) | ||
684 | return; | ||
685 | hugetlb_add_hstate(mmu_psize_defs[psize].shift - PAGE_SHIFT); | ||
686 | 667 | ||
687 | if (mmu_psize_defs[psize].shift < PMD_SHIFT) | 668 | if ((mmu_psize = shift_to_mmu_psize(shift)) < 0) |
688 | pdshift = PMD_SHIFT; | 669 | return -EINVAL; |
689 | else if (mmu_psize_defs[psize].shift < PUD_SHIFT) | 670 | |
690 | pdshift = PUD_SHIFT; | 671 | #ifdef CONFIG_SPU_FS_64K_LS |
691 | else | 672 | /* Disable support for 64K huge pages when 64K SPU local store |
692 | pdshift = PGDIR_SHIFT; | 673 | * support is enabled as the current implementation conflicts. |
693 | mmu_huge_psizes[psize] = pdshift - mmu_psize_defs[psize].shift; | 674 | */ |
694 | } | 675 | if (shift == PAGE_SHIFT_64K) |
676 | return -EINVAL; | ||
677 | #endif /* CONFIG_SPU_FS_64K_LS */ | ||
678 | |||
679 | BUG_ON(mmu_psize_defs[mmu_psize].shift != shift); | ||
680 | |||
681 | /* Return if huge page size has already been setup */ | ||
682 | if (size_to_hstate(size)) | ||
683 | return 0; | ||
684 | |||
685 | hugetlb_add_hstate(shift - PAGE_SHIFT); | ||
686 | |||
687 | return 0; | ||
695 | } | 688 | } |
696 | 689 | ||
697 | static int __init hugepage_setup_sz(char *str) | 690 | static int __init hugepage_setup_sz(char *str) |
698 | { | 691 | { |
699 | unsigned long long size; | 692 | unsigned long long size; |
700 | int mmu_psize; | ||
701 | int shift; | ||
702 | 693 | ||
703 | size = memparse(str, &str); | 694 | size = memparse(str, &str); |
704 | 695 | ||
705 | shift = __ffs(size); | 696 | if (add_huge_page_size(size) != 0) |
706 | mmu_psize = shift_to_mmu_psize(shift); | ||
707 | if (mmu_psize >= 0 && mmu_psize_defs[mmu_psize].shift) | ||
708 | set_huge_psize(mmu_psize); | ||
709 | else | ||
710 | printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size); | 697 | printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size); |
711 | 698 | ||
712 | return 1; | 699 | return 1; |
@@ -720,30 +707,39 @@ static int __init hugetlbpage_init(void) | |||
720 | if (!cpu_has_feature(CPU_FTR_16M_PAGE)) | 707 | if (!cpu_has_feature(CPU_FTR_16M_PAGE)) |
721 | return -ENODEV; | 708 | return -ENODEV; |
722 | 709 | ||
723 | /* Add supported huge page sizes. Need to change | 710 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
724 | * HUGE_MAX_HSTATE if the number of supported huge page sizes | 711 | unsigned shift; |
725 | * changes. | 712 | unsigned pdshift; |
726 | */ | ||
727 | set_huge_psize(MMU_PAGE_16M); | ||
728 | set_huge_psize(MMU_PAGE_16G); | ||
729 | 713 | ||
730 | /* Temporarily disable support for 64K huge pages when 64K SPU local | 714 | if (!mmu_psize_defs[psize].shift) |
731 | * store support is enabled as the current implementation conflicts. | 715 | continue; |
732 | */ | ||
733 | #ifndef CONFIG_SPU_FS_64K_LS | ||
734 | set_huge_psize(MMU_PAGE_64K); | ||
735 | #endif | ||
736 | 716 | ||
737 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { | 717 | shift = mmu_psize_to_shift(psize); |
738 | if (mmu_huge_psizes[psize]) { | 718 | |
739 | pgtable_cache_add(mmu_huge_psizes[psize], NULL); | 719 | if (add_huge_page_size(1ULL << shift) < 0) |
740 | if (!PGT_CACHE(mmu_huge_psizes[psize])) | 720 | continue; |
741 | panic("hugetlbpage_init(): could not create " | 721 | |
742 | "pgtable cache for %d bit pagesize\n", | 722 | if (shift < PMD_SHIFT) |
743 | mmu_psize_to_shift(psize)); | 723 | pdshift = PMD_SHIFT; |
744 | } | 724 | else if (shift < PUD_SHIFT) |
725 | pdshift = PUD_SHIFT; | ||
726 | else | ||
727 | pdshift = PGDIR_SHIFT; | ||
728 | |||
729 | pgtable_cache_add(pdshift - shift, NULL); | ||
730 | if (!PGT_CACHE(pdshift - shift)) | ||
731 | panic("hugetlbpage_init(): could not create " | ||
732 | "pgtable cache for %d bit pagesize\n", shift); | ||
745 | } | 733 | } |
746 | 734 | ||
735 | /* Set default large page size. Currently, we pick 16M or 1M | ||
736 | * depending on what is available | ||
737 | */ | ||
738 | if (mmu_psize_defs[MMU_PAGE_16M].shift) | ||
739 | HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift; | ||
740 | else if (mmu_psize_defs[MMU_PAGE_1M].shift) | ||
741 | HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift; | ||
742 | |||
747 | return 0; | 743 | return 0; |
748 | } | 744 | } |
749 | 745 | ||