aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hugetlbpage.c
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2009-10-28 12:27:18 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-10-30 02:20:57 -0400
commita0668cdc154e54bf0c85182e0535eea237d53146 (patch)
tree84efcadf011e16c240ac9b1c948141fc1cc7d324 /arch/powerpc/mm/hugetlbpage.c
parentf71dc176aa06359681c30ba6877ffccab6fba3a6 (diff)
powerpc/mm: Cleanup management of kmem_caches for pagetables
Currently we have a fair bit of rather fiddly code to manage the various kmem_caches used to store page tables of various levels. We generally have two caches holding some combination of PGD, PUD and PMD tables, plus several more for the special hugepage pagetables. This patch cleans this all up by taking a different approach. Rather than the caches being designated as for PUDs or for hugeptes for 16M pages, the caches are simply allocated to be a specific size. Thus sharing of caches between different types/levels of pagetables happens naturally. The pagetable size, where needed, is passed around encoded in the same way as {PGD,PUD,PMD}_INDEX_SIZE; that is n where the pagetable contains 2^n pointers. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c51
1 files changed, 15 insertions, 36 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 3d542a9732ae..7230d7a4fbd9 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -43,26 +43,14 @@ static unsigned nr_gpages;
43unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */ 43unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */
44 44
45#define hugepte_shift mmu_huge_psizes 45#define hugepte_shift mmu_huge_psizes
46#define PTRS_PER_HUGEPTE(psize) (1 << hugepte_shift[psize]) 46#define HUGEPTE_INDEX_SIZE(psize) (mmu_huge_psizes[(psize)])
47#define HUGEPTE_TABLE_SIZE(psize) (sizeof(pte_t) << hugepte_shift[psize]) 47#define PTRS_PER_HUGEPTE(psize) (1 << mmu_huge_psizes[psize])
48 48
49#define HUGEPD_SHIFT(psize) (mmu_psize_to_shift(psize) \ 49#define HUGEPD_SHIFT(psize) (mmu_psize_to_shift(psize) \
50 + hugepte_shift[psize]) 50 + HUGEPTE_INDEX_SIZE(psize))
51#define HUGEPD_SIZE(psize) (1UL << HUGEPD_SHIFT(psize)) 51#define HUGEPD_SIZE(psize) (1UL << HUGEPD_SHIFT(psize))
52#define HUGEPD_MASK(psize) (~(HUGEPD_SIZE(psize)-1)) 52#define HUGEPD_MASK(psize) (~(HUGEPD_SIZE(psize)-1))
53 53
54/* Subtract one from array size because we don't need a cache for 4K since
55 * is not a huge page size */
56#define HUGE_PGTABLE_INDEX(psize) (HUGEPTE_CACHE_NUM + psize - 1)
57#define HUGEPTE_CACHE_NAME(psize) (huge_pgtable_cache_name[psize])
58
59static const char *huge_pgtable_cache_name[MMU_PAGE_COUNT] = {
60 [MMU_PAGE_64K] = "hugepte_cache_64K",
61 [MMU_PAGE_1M] = "hugepte_cache_1M",
62 [MMU_PAGE_16M] = "hugepte_cache_16M",
63 [MMU_PAGE_16G] = "hugepte_cache_16G",
64};
65
66/* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad() 54/* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
67 * will choke on pointers to hugepte tables, which is handy for 55 * will choke on pointers to hugepte tables, which is handy for
68 * catching screwups early. */ 56 * catching screwups early. */
@@ -114,15 +102,15 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
114static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, 102static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
115 unsigned long address, unsigned int psize) 103 unsigned long address, unsigned int psize)
116{ 104{
117 pte_t *new = kmem_cache_zalloc(pgtable_cache[HUGE_PGTABLE_INDEX(psize)], 105 pte_t *new = kmem_cache_zalloc(PGT_CACHE(hugepte_shift[psize]),
118 GFP_KERNEL|__GFP_REPEAT); 106 GFP_KERNEL|__GFP_REPEAT);
119 107
120 if (! new) 108 if (! new)
121 return -ENOMEM; 109 return -ENOMEM;
122 110
123 spin_lock(&mm->page_table_lock); 111 spin_lock(&mm->page_table_lock);
124 if (!hugepd_none(*hpdp)) 112 if (!hugepd_none(*hpdp))
125 kmem_cache_free(pgtable_cache[HUGE_PGTABLE_INDEX(psize)], new); 113 kmem_cache_free(PGT_CACHE(hugepte_shift[psize]), new);
126 else 114 else
127 hpdp->pd = (unsigned long)new | HUGEPD_OK; 115 hpdp->pd = (unsigned long)new | HUGEPD_OK;
128 spin_unlock(&mm->page_table_lock); 116 spin_unlock(&mm->page_table_lock);
@@ -271,9 +259,7 @@ static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp,
271 259
272 hpdp->pd = 0; 260 hpdp->pd = 0;
273 tlb->need_flush = 1; 261 tlb->need_flush = 1;
274 pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, 262 pgtable_free_tlb(tlb, hugepte, hugepte_shift[psize]);
275 HUGEPTE_CACHE_NUM+psize-1,
276 PGF_CACHENUM_MASK));
277} 263}
278 264
279static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 265static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -698,8 +684,6 @@ static void __init set_huge_psize(int psize)
698 if (mmu_huge_psizes[psize] || 684 if (mmu_huge_psizes[psize] ||
699 mmu_psize_defs[psize].shift == PAGE_SHIFT) 685 mmu_psize_defs[psize].shift == PAGE_SHIFT)
700 return; 686 return;
701 if (WARN_ON(HUGEPTE_CACHE_NAME(psize) == NULL))
702 return;
703 hugetlb_add_hstate(mmu_psize_defs[psize].shift - PAGE_SHIFT); 687 hugetlb_add_hstate(mmu_psize_defs[psize].shift - PAGE_SHIFT);
704 688
705 switch (mmu_psize_defs[psize].shift) { 689 switch (mmu_psize_defs[psize].shift) {
@@ -753,9 +737,9 @@ static int __init hugetlbpage_init(void)
753 if (!cpu_has_feature(CPU_FTR_16M_PAGE)) 737 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
754 return -ENODEV; 738 return -ENODEV;
755 739
756 /* Add supported huge page sizes. Need to change HUGE_MAX_HSTATE 740 /* Add supported huge page sizes. Need to change
757 * and adjust PTE_NONCACHE_NUM if the number of supported huge page 741 * HUGE_MAX_HSTATE if the number of supported huge page sizes
758 * sizes changes. 742 * changes.
759 */ 743 */
760 set_huge_psize(MMU_PAGE_16M); 744 set_huge_psize(MMU_PAGE_16M);
761 set_huge_psize(MMU_PAGE_16G); 745 set_huge_psize(MMU_PAGE_16G);
@@ -769,16 +753,11 @@ static int __init hugetlbpage_init(void)
769 753
770 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 754 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
771 if (mmu_huge_psizes[psize]) { 755 if (mmu_huge_psizes[psize]) {
772 pgtable_cache[HUGE_PGTABLE_INDEX(psize)] = 756 pgtable_cache_add(hugepte_shift[psize], NULL);
773 kmem_cache_create( 757 if (!PGT_CACHE(hugepte_shift[psize]))
774 HUGEPTE_CACHE_NAME(psize), 758 panic("hugetlbpage_init(): could not create "
775 HUGEPTE_TABLE_SIZE(psize), 759 "pgtable cache for %d bit pagesize\n",
776 HUGEPTE_TABLE_SIZE(psize), 760 mmu_psize_to_shift(psize));
777 0,
778 NULL);
779 if (!pgtable_cache[HUGE_PGTABLE_INDEX(psize)])
780 panic("hugetlbpage_init(): could not create %s"\
781 "\n", HUGEPTE_CACHE_NAME(psize));
782 } 761 }
783 } 762 }
784 763