aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/init_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/init_64.c')
-rw-r--r--arch/powerpc/mm/init_64.c77
1 files changed, 56 insertions, 21 deletions
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 335c578b9cc3..d7fa50b09b4a 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -41,6 +41,8 @@
41#include <linux/module.h> 41#include <linux/module.h>
42#include <linux/poison.h> 42#include <linux/poison.h>
43#include <linux/lmb.h> 43#include <linux/lmb.h>
44#include <linux/hugetlb.h>
45#include <linux/slab.h>
44 46
45#include <asm/pgalloc.h> 47#include <asm/pgalloc.h>
46#include <asm/page.h> 48#include <asm/page.h>
@@ -119,30 +121,63 @@ static void pmd_ctor(void *addr)
119 memset(addr, 0, PMD_TABLE_SIZE); 121 memset(addr, 0, PMD_TABLE_SIZE);
120} 122}
121 123
122static const unsigned int pgtable_cache_size[2] = { 124struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
123 PGD_TABLE_SIZE, PMD_TABLE_SIZE 125
124}; 126/*
125static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { 127 * Create a kmem_cache() for pagetables. This is not used for PTE
126#ifdef CONFIG_PPC_64K_PAGES 128 * pages - they're linked to struct page, come from the normal free
127 "pgd_cache", "pmd_cache", 129 * pages pool and have a different entry size (see real_pte_t) to
128#else 130 * everything else. Caches created by this function are used for all
129 "pgd_cache", "pud_pmd_cache", 131 * the higher level pagetables, and for hugepage pagetables.
130#endif /* CONFIG_PPC_64K_PAGES */ 132 */
131}; 133void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
132 134{
133#ifdef CONFIG_HUGETLB_PAGE 135 char *name;
134/* Hugepages need an extra cache per hugepagesize, initialized in 136 unsigned long table_size = sizeof(void *) << shift;
135 * hugetlbpage.c. We can't put into the tables above, because HPAGE_SHIFT 137 unsigned long align = table_size;
136 * is not compile time constant. */ 138
137struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+MMU_PAGE_COUNT]; 139 /* When batching pgtable pointers for RCU freeing, we store
138#else 140 * the index size in the low bits. Table alignment must be
139struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; 141 * big enough to fit it.
140#endif 142 *
143 * Likewise, hugeapge pagetable pointers contain a (different)
144 * shift value in the low bits. All tables must be aligned so
145 * as to leave enough 0 bits in the address to contain it. */
146 unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
147 HUGEPD_SHIFT_MASK + 1);
148 struct kmem_cache *new;
149
150 /* It would be nice if this was a BUILD_BUG_ON(), but at the
151 * moment, gcc doesn't seem to recognize is_power_of_2 as a
152 * constant expression, so so much for that. */
153 BUG_ON(!is_power_of_2(minalign));
154 BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
155
156 if (PGT_CACHE(shift))
157 return; /* Already have a cache of this size */
158
159 align = max_t(unsigned long, align, minalign);
160 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
161 new = kmem_cache_create(name, table_size, align, 0, ctor);
162 PGT_CACHE(shift) = new;
163
164 pr_debug("Allocated pgtable cache for order %d\n", shift);
165}
166
141 167
142void pgtable_cache_init(void) 168void pgtable_cache_init(void)
143{ 169{
144 pgtable_cache[0] = kmem_cache_create(pgtable_cache_name[0], PGD_TABLE_SIZE, PGD_TABLE_SIZE, SLAB_PANIC, pgd_ctor); 170 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
145 pgtable_cache[1] = kmem_cache_create(pgtable_cache_name[1], PMD_TABLE_SIZE, PMD_TABLE_SIZE, SLAB_PANIC, pmd_ctor); 171 pgtable_cache_add(PMD_INDEX_SIZE, pmd_ctor);
172 if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_INDEX_SIZE))
173 panic("Couldn't allocate pgtable caches");
174
175 /* In all current configs, when the PUD index exists it's the
176 * same size as either the pgd or pmd index. Verify that the
177 * initialization above has also created a PUD cache. This
178 * will need re-examiniation if we add new possibilities for
179 * the pagetable layout. */
180 BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE));
146} 181}
147 182
148#ifdef CONFIG_SPARSEMEM_VMEMMAP 183#ifdef CONFIG_SPARSEMEM_VMEMMAP