aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/Kconfig13
-rw-r--r--arch/powerpc/mm/init_64.c17
-rw-r--r--include/asm-powerpc/pgalloc-64.h31
3 files changed, 19 insertions, 42 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ecd459dd1baf..ccc5410af996 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -120,19 +120,6 @@ config GENERIC_BUG
120config SYS_SUPPORTS_APM_EMULATION 120config SYS_SUPPORTS_APM_EMULATION
121 bool 121 bool
122 122
123#
124# Powerpc uses the slab allocator to manage its ptes and the
125# page structs of ptes are used for splitting the page table
126# lock for configurations supporting more than SPLIT_PTLOCK_CPUS.
127#
128# In that special configuration the page structs of slabs are modified.
129# This setting disables the selection of SLUB as a slab allocator.
130#
131config ARCH_USES_SLAB_PAGE_STRUCT
132 bool
133 default y
134 depends on SPLIT_PTLOCK_CPUS <= NR_CPUS
135
136config DEFAULT_UIMAGE 123config DEFAULT_UIMAGE
137 bool 124 bool
138 help 125 help
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index fe1fe852181a..7312a265545f 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -146,21 +146,16 @@ static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
146 memset(addr, 0, kmem_cache_size(cache)); 146 memset(addr, 0, kmem_cache_size(cache));
147} 147}
148 148
149#ifdef CONFIG_PPC_64K_PAGES
150static const unsigned int pgtable_cache_size[3] = {
151 PTE_TABLE_SIZE, PMD_TABLE_SIZE, PGD_TABLE_SIZE
152};
153static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
154 "pte_pmd_cache", "pmd_cache", "pgd_cache",
155};
156#else
157static const unsigned int pgtable_cache_size[2] = { 149static const unsigned int pgtable_cache_size[2] = {
158 PTE_TABLE_SIZE, PMD_TABLE_SIZE 150 PGD_TABLE_SIZE, PMD_TABLE_SIZE
159}; 151};
160static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { 152static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
161 "pgd_pte_cache", "pud_pmd_cache", 153#ifdef CONFIG_PPC_64K_PAGES
162}; 154 "pgd_cache", "pmd_cache",
155#else
156 "pgd_cache", "pud_pmd_cache",
163#endif /* CONFIG_PPC_64K_PAGES */ 157#endif /* CONFIG_PPC_64K_PAGES */
158};
164 159
165#ifdef CONFIG_HUGETLB_PAGE 160#ifdef CONFIG_HUGETLB_PAGE
166/* Hugepages need one extra cache, initialized in hugetlbpage.c. We 161/* Hugepages need one extra cache, initialized in hugetlbpage.c. We
diff --git a/include/asm-powerpc/pgalloc-64.h b/include/asm-powerpc/pgalloc-64.h
index 30b50cf56e2c..d9a3a8ca58a1 100644
--- a/include/asm-powerpc/pgalloc-64.h
+++ b/include/asm-powerpc/pgalloc-64.h
@@ -14,18 +14,11 @@
14 14
15extern struct kmem_cache *pgtable_cache[]; 15extern struct kmem_cache *pgtable_cache[];
16 16
17#ifdef CONFIG_PPC_64K_PAGES 17#define PGD_CACHE_NUM 0
18#define PTE_CACHE_NUM 0 18#define PUD_CACHE_NUM 1
19#define PMD_CACHE_NUM 1 19#define PMD_CACHE_NUM 1
20#define PGD_CACHE_NUM 2 20#define HUGEPTE_CACHE_NUM 2
21#define HUGEPTE_CACHE_NUM 3 21#define PTE_NONCACHE_NUM 3 /* from GFP rather than kmem_cache */
22#else
23#define PTE_CACHE_NUM 0
24#define PMD_CACHE_NUM 1
25#define PUD_CACHE_NUM 1
26#define PGD_CACHE_NUM 0
27#define HUGEPTE_CACHE_NUM 2
28#endif
29 22
30static inline pgd_t *pgd_alloc(struct mm_struct *mm) 23static inline pgd_t *pgd_alloc(struct mm_struct *mm)
31{ 24{
@@ -91,8 +84,7 @@ static inline void pmd_free(pmd_t *pmd)
91static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 84static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
92 unsigned long address) 85 unsigned long address)
93{ 86{
94 return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM], 87 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
95 GFP_KERNEL|__GFP_REPEAT);
96} 88}
97 89
98static inline struct page *pte_alloc_one(struct mm_struct *mm, 90static inline struct page *pte_alloc_one(struct mm_struct *mm,
@@ -103,12 +95,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
103 95
104static inline void pte_free_kernel(pte_t *pte) 96static inline void pte_free_kernel(pte_t *pte)
105{ 97{
106 kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte); 98 free_page((unsigned long)pte);
107} 99}
108 100
109static inline void pte_free(struct page *ptepage) 101static inline void pte_free(struct page *ptepage)
110{ 102{
111 pte_free_kernel(page_address(ptepage)); 103 __free_page(ptepage);
112} 104}
113 105
114#define PGF_CACHENUM_MASK 0x3 106#define PGF_CACHENUM_MASK 0x3
@@ -130,14 +122,17 @@ static inline void pgtable_free(pgtable_free_t pgf)
130 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); 122 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
131 int cachenum = pgf.val & PGF_CACHENUM_MASK; 123 int cachenum = pgf.val & PGF_CACHENUM_MASK;
132 124
133 kmem_cache_free(pgtable_cache[cachenum], p); 125 if (cachenum == PTE_NONCACHE_NUM)
126 free_page((unsigned long)p);
127 else
128 kmem_cache_free(pgtable_cache[cachenum], p);
134} 129}
135 130
136extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); 131extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
137 132
138#define __pte_free_tlb(tlb, ptepage) \ 133#define __pte_free_tlb(tlb, ptepage) \
139 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ 134 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
140 PTE_CACHE_NUM, PTE_TABLE_SIZE-1)) 135 PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1))
141#define __pmd_free_tlb(tlb, pmd) \ 136#define __pmd_free_tlb(tlb, pmd) \
142 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ 137 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
143 PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) 138 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))