aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/pgalloc-64.h
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2009-10-28 12:27:18 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-10-30 02:20:57 -0400
commita0668cdc154e54bf0c85182e0535eea237d53146 (patch)
tree84efcadf011e16c240ac9b1c948141fc1cc7d324 /arch/powerpc/include/asm/pgalloc-64.h
parentf71dc176aa06359681c30ba6877ffccab6fba3a6 (diff)
powerpc/mm: Cleanup management of kmem_caches for pagetables
Currently we have a fair bit of rather fiddly code to manage the various kmem_caches used to store page tables of various levels. We generally have two caches holding some combination of PGD, PUD and PMD tables, plus several more for the special hugepage pagetables. This patch cleans this all up by taking a different approach. Rather than the caches being designated as for PUDs or for hugeptes for 16M pages, the caches are simply allocated to be a specific size. Thus sharing of caches between different types/levels of pagetables happens naturally. The pagetable size, where needed, is passed around encoded in the same way as {PGD,PUD,PMD}_INDEX_SIZE; that is n where the pagetable contains 2^n pointers. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/pgalloc-64.h')
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h60
1 files changed, 35 insertions, 25 deletions
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index e6f069c4f713..5c1cd73dafa8 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -11,27 +11,39 @@
11#include <linux/cpumask.h> 11#include <linux/cpumask.h>
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13 13
14/*
15 * Functions that deal with pagetables that could be at any level of
16 * the table need to be passed an "index_size" so they know how to
17 * handle allocation. For PTE pages (which are linked to a struct
18 * page for now, and drawn from the main get_free_pages() pool), the
19 * allocation size will be (2^index_size * sizeof(pointer)) and
20 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
21 *
22 * The maximum index size needs to be big enough to allow any
23 * pagetable sizes we need, but small enough to fit in the low bits of
24 * any page table pointer. In other words all pagetables, even tiny
25 * ones, must be aligned to allow at least enough low 0 bits to
26 * contain this value. This value is also used as a mask, so it must
27 * be one less than a power of two.
28 */
29#define MAX_PGTABLE_INDEX_SIZE 0xf
30
14#ifndef CONFIG_PPC_SUBPAGE_PROT 31#ifndef CONFIG_PPC_SUBPAGE_PROT
15static inline void subpage_prot_free(pgd_t *pgd) {} 32static inline void subpage_prot_free(pgd_t *pgd) {}
16#endif 33#endif
17 34
18extern struct kmem_cache *pgtable_cache[]; 35extern struct kmem_cache *pgtable_cache[];
19 36#define PGT_CACHE(shift) (pgtable_cache[(shift)-1])
20#define PGD_CACHE_NUM 0
21#define PUD_CACHE_NUM 1
22#define PMD_CACHE_NUM 1
23#define HUGEPTE_CACHE_NUM 2
24#define PTE_NONCACHE_NUM 7 /* from GFP rather than kmem_cache */
25 37
26static inline pgd_t *pgd_alloc(struct mm_struct *mm) 38static inline pgd_t *pgd_alloc(struct mm_struct *mm)
27{ 39{
28 return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL); 40 return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
29} 41}
30 42
31static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 43static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
32{ 44{
33 subpage_prot_free(pgd); 45 subpage_prot_free(pgd);
34 kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd); 46 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
35} 47}
36 48
37#ifndef CONFIG_PPC_64K_PAGES 49#ifndef CONFIG_PPC_64K_PAGES
@@ -40,13 +52,13 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
40 52
41static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 53static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
42{ 54{
43 return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM], 55 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
44 GFP_KERNEL|__GFP_REPEAT); 56 GFP_KERNEL|__GFP_REPEAT);
45} 57}
46 58
47static inline void pud_free(struct mm_struct *mm, pud_t *pud) 59static inline void pud_free(struct mm_struct *mm, pud_t *pud)
48{ 60{
49 kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud); 61 kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
50} 62}
51 63
52static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 64static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
@@ -78,13 +90,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
78 90
79static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 91static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
80{ 92{
81 return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM], 93 return kmem_cache_alloc(PGT_CACHE(PMD_INDEX_SIZE),
82 GFP_KERNEL|__GFP_REPEAT); 94 GFP_KERNEL|__GFP_REPEAT);
83} 95}
84 96
85static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 97static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
86{ 98{
87 kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd); 99 kmem_cache_free(PGT_CACHE(PMD_INDEX_SIZE), pmd);
88} 100}
89 101
90static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 102static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
@@ -107,24 +119,22 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
107 return page; 119 return page;
108} 120}
109 121
110static inline void pgtable_free(pgtable_free_t pgf) 122static inline void pgtable_free(void *table, unsigned index_size)
111{ 123{
112 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); 124 if (!index_size)
113 int cachenum = pgf.val & PGF_CACHENUM_MASK; 125 free_page((unsigned long)table);
114 126 else {
115 if (cachenum == PTE_NONCACHE_NUM) 127 BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
116 free_page((unsigned long)p); 128 kmem_cache_free(PGT_CACHE(index_size), table);
117 else 129 }
118 kmem_cache_free(pgtable_cache[cachenum], p);
119} 130}
120 131
121#define __pmd_free_tlb(tlb, pmd,addr) \ 132#define __pmd_free_tlb(tlb, pmd, addr) \
122 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ 133 pgtable_free_tlb(tlb, pmd, PMD_INDEX_SIZE)
123 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
124#ifndef CONFIG_PPC_64K_PAGES 134#ifndef CONFIG_PPC_64K_PAGES
125#define __pud_free_tlb(tlb, pud, addr) \ 135#define __pud_free_tlb(tlb, pud, addr) \
126 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ 136 pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
127 PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) 137
128#endif /* CONFIG_PPC_64K_PAGES */ 138#endif /* CONFIG_PPC_64K_PAGES */
129 139
130#define check_pgt_cache() do { } while (0) 140#define check_pgt_cache() do { } while (0)