aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2007-05-09 00:38:48 -0400
committerPaul Mackerras <paulus@samba.org>2007-05-09 02:35:00 -0400
commit517e22638c282bb07c52a11f928961ed4822196b (patch)
tree7eab8eb1242ee18f75c325077f26bdcb86133512 /arch
parentf1fa74f4afe96b0e4ac2beaa61fa4f4667acdcbb (diff)
[POWERPC] Don't use SLAB/SLUB for PTE pages
The SLUB allocator relies on struct page fields first_page and slab, overwritten by ptl when SPLIT_PTLOCK: so the SLUB allocator cannot then be used for the lowest level of pagetable pages. This was obstructing SLUB on PowerPC, which uses kmem_caches for its pagetables. So convert its pte level to use normal gfp pages (whereas pmd, pud and 64k-page pgd want partpages, so continue to use kmem_caches for pmd, pud and pgd). Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/Kconfig13
-rw-r--r--arch/powerpc/mm/init_64.c17
2 files changed, 6 insertions, 24 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ecd459dd1baf..ccc5410af996 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -120,19 +120,6 @@ config GENERIC_BUG
120config SYS_SUPPORTS_APM_EMULATION 120config SYS_SUPPORTS_APM_EMULATION
121 bool 121 bool
122 122
123#
124# Powerpc uses the slab allocator to manage its ptes and the
125# page structs of ptes are used for splitting the page table
126# lock for configurations supporting more than SPLIT_PTLOCK_CPUS.
127#
128# In that special configuration the page structs of slabs are modified.
129# This setting disables the selection of SLUB as a slab allocator.
130#
131config ARCH_USES_SLAB_PAGE_STRUCT
132 bool
133 default y
134 depends on SPLIT_PTLOCK_CPUS <= NR_CPUS
135
136config DEFAULT_UIMAGE 123config DEFAULT_UIMAGE
137 bool 124 bool
138 help 125 help
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index fe1fe852181a..7312a265545f 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -146,21 +146,16 @@ static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
146 memset(addr, 0, kmem_cache_size(cache)); 146 memset(addr, 0, kmem_cache_size(cache));
147} 147}
148 148
149#ifdef CONFIG_PPC_64K_PAGES
150static const unsigned int pgtable_cache_size[3] = {
151 PTE_TABLE_SIZE, PMD_TABLE_SIZE, PGD_TABLE_SIZE
152};
153static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
154 "pte_pmd_cache", "pmd_cache", "pgd_cache",
155};
156#else
157static const unsigned int pgtable_cache_size[2] = { 149static const unsigned int pgtable_cache_size[2] = {
158 PTE_TABLE_SIZE, PMD_TABLE_SIZE 150 PGD_TABLE_SIZE, PMD_TABLE_SIZE
159}; 151};
160static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { 152static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
161 "pgd_pte_cache", "pud_pmd_cache", 153#ifdef CONFIG_PPC_64K_PAGES
162}; 154 "pgd_cache", "pmd_cache",
155#else
156 "pgd_cache", "pud_pmd_cache",
163#endif /* CONFIG_PPC_64K_PAGES */ 157#endif /* CONFIG_PPC_64K_PAGES */
158};
164 159
165#ifdef CONFIG_HUGETLB_PAGE 160#ifdef CONFIG_HUGETLB_PAGE
166/* Hugepages need one extra cache, initialized in hugetlbpage.c. We 161/* Hugepages need one extra cache, initialized in hugetlbpage.c. We