aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/pgalloc-64.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/pgalloc-64.h')
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h63
1 files changed, 34 insertions, 29 deletions
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index e6f069c4f71..605f5c5398d 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -11,27 +11,34 @@
11#include <linux/cpumask.h> 11#include <linux/cpumask.h>
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13 13
14#ifndef CONFIG_PPC_SUBPAGE_PROT 14/*
15static inline void subpage_prot_free(pgd_t *pgd) {} 15 * Functions that deal with pagetables that could be at any level of
16#endif 16 * the table need to be passed an "index_size" so they know how to
17 * handle allocation. For PTE pages (which are linked to a struct
18 * page for now, and drawn from the main get_free_pages() pool), the
19 * allocation size will be (2^index_size * sizeof(pointer)) and
20 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
21 *
22 * The maximum index size needs to be big enough to allow any
23 * pagetable sizes we need, but small enough to fit in the low bits of
24 * any page table pointer. In other words all pagetables, even tiny
25 * ones, must be aligned to allow at least enough low 0 bits to
26 * contain this value. This value is also used as a mask, so it must
27 * be one less than a power of two.
28 */
29#define MAX_PGTABLE_INDEX_SIZE 0xf
17 30
18extern struct kmem_cache *pgtable_cache[]; 31extern struct kmem_cache *pgtable_cache[];
19 32#define PGT_CACHE(shift) (pgtable_cache[(shift)-1])
20#define PGD_CACHE_NUM 0
21#define PUD_CACHE_NUM 1
22#define PMD_CACHE_NUM 1
23#define HUGEPTE_CACHE_NUM 2
24#define PTE_NONCACHE_NUM 7 /* from GFP rather than kmem_cache */
25 33
26static inline pgd_t *pgd_alloc(struct mm_struct *mm) 34static inline pgd_t *pgd_alloc(struct mm_struct *mm)
27{ 35{
28 return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL); 36 return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
29} 37}
30 38
31static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 39static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
32{ 40{
33 subpage_prot_free(pgd); 41 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
34 kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
35} 42}
36 43
37#ifndef CONFIG_PPC_64K_PAGES 44#ifndef CONFIG_PPC_64K_PAGES
@@ -40,13 +47,13 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
40 47
41static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 48static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
42{ 49{
43 return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM], 50 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
44 GFP_KERNEL|__GFP_REPEAT); 51 GFP_KERNEL|__GFP_REPEAT);
45} 52}
46 53
47static inline void pud_free(struct mm_struct *mm, pud_t *pud) 54static inline void pud_free(struct mm_struct *mm, pud_t *pud)
48{ 55{
49 kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud); 56 kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
50} 57}
51 58
52static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 59static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
@@ -78,13 +85,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
78 85
79static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 86static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
80{ 87{
81 return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM], 88 return kmem_cache_alloc(PGT_CACHE(PMD_INDEX_SIZE),
82 GFP_KERNEL|__GFP_REPEAT); 89 GFP_KERNEL|__GFP_REPEAT);
83} 90}
84 91
85static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 92static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
86{ 93{
87 kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd); 94 kmem_cache_free(PGT_CACHE(PMD_INDEX_SIZE), pmd);
88} 95}
89 96
90static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 97static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
@@ -107,24 +114,22 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
107 return page; 114 return page;
108} 115}
109 116
110static inline void pgtable_free(pgtable_free_t pgf) 117static inline void pgtable_free(void *table, unsigned index_size)
111{ 118{
112 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); 119 if (!index_size)
113 int cachenum = pgf.val & PGF_CACHENUM_MASK; 120 free_page((unsigned long)table);
114 121 else {
115 if (cachenum == PTE_NONCACHE_NUM) 122 BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
116 free_page((unsigned long)p); 123 kmem_cache_free(PGT_CACHE(index_size), table);
117 else 124 }
118 kmem_cache_free(pgtable_cache[cachenum], p);
119} 125}
120 126
121#define __pmd_free_tlb(tlb, pmd,addr) \ 127#define __pmd_free_tlb(tlb, pmd, addr) \
122 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ 128 pgtable_free_tlb(tlb, pmd, PMD_INDEX_SIZE)
123 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
124#ifndef CONFIG_PPC_64K_PAGES 129#ifndef CONFIG_PPC_64K_PAGES
125#define __pud_free_tlb(tlb, pud, addr) \ 130#define __pud_free_tlb(tlb, pud, addr) \
126 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ 131 pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
127 PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) 132
128#endif /* CONFIG_PPC_64K_PAGES */ 133#endif /* CONFIG_PPC_64K_PAGES */
129 134
130#define check_pgt_cache() do { } while (0) 135#define check_pgt_cache() do { } while (0)