diff options
author | Hugh Dickins <hugh@veritas.com> | 2007-05-09 00:38:48 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-05-09 02:35:00 -0400 |
commit | 517e22638c282bb07c52a11f928961ed4822196b (patch) | |
tree | 7eab8eb1242ee18f75c325077f26bdcb86133512 | |
parent | f1fa74f4afe96b0e4ac2beaa61fa4f4667acdcbb (diff) |
[POWERPC] Don't use SLAB/SLUB for PTE pages
The SLUB allocator relies on struct page fields first_page and slab,
overwritten by ptl when SPLIT_PTLOCK: so the SLUB allocator cannot then
be used for the lowest level of pagetable pages. This was obstructing
SLUB on PowerPC, which uses kmem_caches for its pagetables. So convert
its pte level to use normal gfp pages (whereas pmd, pud and 64k-page pgd
want partpages, so continue to use kmem_caches for pmd, pud and pgd).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/Kconfig | 13 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 17 | ||||
-rw-r--r-- | include/asm-powerpc/pgalloc-64.h | 31 |
3 files changed, 19 insertions, 42 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index ecd459dd1baf..ccc5410af996 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -120,19 +120,6 @@ config GENERIC_BUG | |||
120 | config SYS_SUPPORTS_APM_EMULATION | 120 | config SYS_SUPPORTS_APM_EMULATION |
121 | bool | 121 | bool |
122 | 122 | ||
123 | # | ||
124 | # Powerpc uses the slab allocator to manage its ptes and the | ||
125 | # page structs of ptes are used for splitting the page table | ||
126 | # lock for configurations supporting more than SPLIT_PTLOCK_CPUS. | ||
127 | # | ||
128 | # In that special configuration the page structs of slabs are modified. | ||
129 | # This setting disables the selection of SLUB as a slab allocator. | ||
130 | # | ||
131 | config ARCH_USES_SLAB_PAGE_STRUCT | ||
132 | bool | ||
133 | default y | ||
134 | depends on SPLIT_PTLOCK_CPUS <= NR_CPUS | ||
135 | |||
136 | config DEFAULT_UIMAGE | 123 | config DEFAULT_UIMAGE |
137 | bool | 124 | bool |
138 | help | 125 | help |
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index fe1fe852181a..7312a265545f 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -146,21 +146,16 @@ static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags) | |||
146 | memset(addr, 0, kmem_cache_size(cache)); | 146 | memset(addr, 0, kmem_cache_size(cache)); |
147 | } | 147 | } |
148 | 148 | ||
149 | #ifdef CONFIG_PPC_64K_PAGES | ||
150 | static const unsigned int pgtable_cache_size[3] = { | ||
151 | PTE_TABLE_SIZE, PMD_TABLE_SIZE, PGD_TABLE_SIZE | ||
152 | }; | ||
153 | static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { | ||
154 | "pte_pmd_cache", "pmd_cache", "pgd_cache", | ||
155 | }; | ||
156 | #else | ||
157 | static const unsigned int pgtable_cache_size[2] = { | 149 | static const unsigned int pgtable_cache_size[2] = { |
158 | PTE_TABLE_SIZE, PMD_TABLE_SIZE | 150 | PGD_TABLE_SIZE, PMD_TABLE_SIZE |
159 | }; | 151 | }; |
160 | static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { | 152 | static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { |
161 | "pgd_pte_cache", "pud_pmd_cache", | 153 | #ifdef CONFIG_PPC_64K_PAGES |
162 | }; | 154 | "pgd_cache", "pmd_cache", |
155 | #else | ||
156 | "pgd_cache", "pud_pmd_cache", | ||
163 | #endif /* CONFIG_PPC_64K_PAGES */ | 157 | #endif /* CONFIG_PPC_64K_PAGES */ |
158 | }; | ||
164 | 159 | ||
165 | #ifdef CONFIG_HUGETLB_PAGE | 160 | #ifdef CONFIG_HUGETLB_PAGE |
166 | /* Hugepages need one extra cache, initialized in hugetlbpage.c. We | 161 | /* Hugepages need one extra cache, initialized in hugetlbpage.c. We |
diff --git a/include/asm-powerpc/pgalloc-64.h b/include/asm-powerpc/pgalloc-64.h index 30b50cf56e2c..d9a3a8ca58a1 100644 --- a/include/asm-powerpc/pgalloc-64.h +++ b/include/asm-powerpc/pgalloc-64.h | |||
@@ -14,18 +14,11 @@ | |||
14 | 14 | ||
15 | extern struct kmem_cache *pgtable_cache[]; | 15 | extern struct kmem_cache *pgtable_cache[]; |
16 | 16 | ||
17 | #ifdef CONFIG_PPC_64K_PAGES | 17 | #define PGD_CACHE_NUM 0 |
18 | #define PTE_CACHE_NUM 0 | 18 | #define PUD_CACHE_NUM 1 |
19 | #define PMD_CACHE_NUM 1 | 19 | #define PMD_CACHE_NUM 1 |
20 | #define PGD_CACHE_NUM 2 | 20 | #define HUGEPTE_CACHE_NUM 2 |
21 | #define HUGEPTE_CACHE_NUM 3 | 21 | #define PTE_NONCACHE_NUM 3 /* from GFP rather than kmem_cache */ |
22 | #else | ||
23 | #define PTE_CACHE_NUM 0 | ||
24 | #define PMD_CACHE_NUM 1 | ||
25 | #define PUD_CACHE_NUM 1 | ||
26 | #define PGD_CACHE_NUM 0 | ||
27 | #define HUGEPTE_CACHE_NUM 2 | ||
28 | #endif | ||
29 | 22 | ||
30 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 23 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
31 | { | 24 | { |
@@ -91,8 +84,7 @@ static inline void pmd_free(pmd_t *pmd) | |||
91 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 84 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
92 | unsigned long address) | 85 | unsigned long address) |
93 | { | 86 | { |
94 | return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM], | 87 | return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); |
95 | GFP_KERNEL|__GFP_REPEAT); | ||
96 | } | 88 | } |
97 | 89 | ||
98 | static inline struct page *pte_alloc_one(struct mm_struct *mm, | 90 | static inline struct page *pte_alloc_one(struct mm_struct *mm, |
@@ -103,12 +95,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, | |||
103 | 95 | ||
104 | static inline void pte_free_kernel(pte_t *pte) | 96 | static inline void pte_free_kernel(pte_t *pte) |
105 | { | 97 | { |
106 | kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte); | 98 | free_page((unsigned long)pte); |
107 | } | 99 | } |
108 | 100 | ||
109 | static inline void pte_free(struct page *ptepage) | 101 | static inline void pte_free(struct page *ptepage) |
110 | { | 102 | { |
111 | pte_free_kernel(page_address(ptepage)); | 103 | __free_page(ptepage); |
112 | } | 104 | } |
113 | 105 | ||
114 | #define PGF_CACHENUM_MASK 0x3 | 106 | #define PGF_CACHENUM_MASK 0x3 |
@@ -130,14 +122,17 @@ static inline void pgtable_free(pgtable_free_t pgf) | |||
130 | void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); | 122 | void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); |
131 | int cachenum = pgf.val & PGF_CACHENUM_MASK; | 123 | int cachenum = pgf.val & PGF_CACHENUM_MASK; |
132 | 124 | ||
133 | kmem_cache_free(pgtable_cache[cachenum], p); | 125 | if (cachenum == PTE_NONCACHE_NUM) |
126 | free_page((unsigned long)p); | ||
127 | else | ||
128 | kmem_cache_free(pgtable_cache[cachenum], p); | ||
134 | } | 129 | } |
135 | 130 | ||
136 | extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); | 131 | extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); |
137 | 132 | ||
138 | #define __pte_free_tlb(tlb, ptepage) \ | 133 | #define __pte_free_tlb(tlb, ptepage) \ |
139 | pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ | 134 | pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ |
140 | PTE_CACHE_NUM, PTE_TABLE_SIZE-1)) | 135 | PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)) |
141 | #define __pmd_free_tlb(tlb, pmd) \ | 136 | #define __pmd_free_tlb(tlb, pmd) \ |
142 | pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ | 137 | pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ |
143 | PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) | 138 | PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) |