aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-04-29 09:26:18 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2016-05-11 07:53:51 -0400
commit934828edfadc43be07e53429ce501741bedf4a5e (patch)
tree5df375f862384879712ca7c1473fe8527e850a74
parent74701d5947a6fb38ece37c1ff1e5a77c36ee7b9c (diff)
powerpc/mm: Make 4K and 64K use pte_t for pgtable_t
This patch switches 4K Linux page size config to use pte_t * type instead of struct page * for pgtable_t. This simplifies the code a lot and helps in consolidating both 64K and 4K page allocator routines. The changes should not have any impact, because we already store physical address in the upper level page table tree and that implies we already do struct page * to physical address conversion. One change to note here is we move the pgtable_page_dtor() call for nohash to pte_fragment_free_mm(). The nohash related change is due to the related changes in pgtable_64.c. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h147
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgalloc.h38
-rw-r--r--arch/powerpc/include/asm/page.h10
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
4 files changed, 52 insertions, 145 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 37283e3d8e56..faad1319ba26 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -41,6 +41,15 @@ extern struct kmem_cache *pgtable_cache[];
41 pgtable_cache[(shift) - 1]; \ 41 pgtable_cache[(shift) - 1]; \
42 }) 42 })
43 43
44#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
45
46extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
47extern void pte_fragment_free(unsigned long *, int);
48extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
49#ifdef CONFIG_SMP
50extern void __tlb_remove_table(void *_table);
51#endif
52
44static inline pgd_t *pgd_alloc(struct mm_struct *mm) 53static inline pgd_t *pgd_alloc(struct mm_struct *mm)
45{ 54{
46 return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL); 55 return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
@@ -72,29 +81,47 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
72 pud_set(pud, __pgtable_ptr_val(pmd)); 81 pud_set(pud, __pgtable_ptr_val(pmd));
73} 82}
74 83
84static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
85 unsigned long address)
86{
87 pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
88}
89
90static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
91{
92 return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
93 GFP_KERNEL|__GFP_REPEAT);
94}
95
96static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
97{
98 kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
99}
100
101static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
102 unsigned long address)
103{
104 return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
105}
106
75static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 107static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
76 pte_t *pte) 108 pte_t *pte)
77{ 109{
78 pmd_set(pmd, __pgtable_ptr_val(pte)); 110 pmd_set(pmd, __pgtable_ptr_val(pte));
79} 111}
80/* 112
81 * FIXME!!
82 * Between 4K and 64K pages, we differ in what is stored in pmd. ie.
83 * typedef pte_t *pgtable_t; -> 64K
84 * typedef struct page *pgtable_t; -> 4k
85 */
86#ifdef CONFIG_PPC_4K_PAGES
87static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, 113static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
88 pgtable_t pte_page) 114 pgtable_t pte_page)
89{ 115{
90 pmd_set(pmd, __pgtable_ptr_val(page_address(pte_page))); 116 pmd_set(pmd, __pgtable_ptr_val(pte_page));
91} 117}
92 118
93static inline pgtable_t pmd_pgtable(pmd_t pmd) 119static inline pgtable_t pmd_pgtable(pmd_t pmd)
94{ 120{
95 return pmd_page(pmd); 121 return (pgtable_t)pmd_page_vaddr(pmd);
96} 122}
97 123
124#ifdef CONFIG_PPC_4K_PAGES
98static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 125static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
99 unsigned long address) 126 unsigned long address)
100{ 127{
@@ -115,83 +142,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
115 __free_page(page); 142 __free_page(page);
116 return NULL; 143 return NULL;
117 } 144 }
118 return page; 145 return pte;
119}
120
121static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
122{
123 free_page((unsigned long)pte);
124}
125
126static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
127{
128 pgtable_page_dtor(ptepage);
129 __free_page(ptepage);
130}
131
132static inline void pgtable_free(void *table, unsigned index_size)
133{
134 if (!index_size)
135 free_page((unsigned long)table);
136 else {
137 BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
138 kmem_cache_free(PGT_CACHE(index_size), table);
139 }
140}
141
142#ifdef CONFIG_SMP
143static inline void pgtable_free_tlb(struct mmu_gather *tlb,
144 void *table, int shift)
145{
146 unsigned long pgf = (unsigned long)table;
147 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
148 pgf |= shift;
149 tlb_remove_table(tlb, (void *)pgf);
150}
151
152static inline void __tlb_remove_table(void *_table)
153{
154 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
155 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
156
157 pgtable_free(table, shift);
158}
159#else /* !CONFIG_SMP */
160static inline void pgtable_free_tlb(struct mmu_gather *tlb,
161 void *table, int shift)
162{
163 pgtable_free(table, shift);
164}
165#endif /* CONFIG_SMP */
166
167static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
168 unsigned long address)
169{
170 tlb_flush_pgtable(tlb, address);
171 pgtable_page_dtor(table);
172 pgtable_free_tlb(tlb, page_address(table), 0);
173} 146}
174
175#else /* if CONFIG_PPC_64K_PAGES */ 147#else /* if CONFIG_PPC_64K_PAGES */
176 148
177extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
178extern void pte_fragment_free(unsigned long *, int);
179extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
180#ifdef CONFIG_SMP
181extern void __tlb_remove_table(void *_table);
182#endif
183
184static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
185 pgtable_t pte_page)
186{
187 pmd_set(pmd, __pgtable_ptr_val(pte_page));
188}
189
190static inline pgtable_t pmd_pgtable(pmd_t pmd)
191{
192 return (pgtable_t)pmd_page_vaddr(pmd);
193}
194
195static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 149static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
196 unsigned long address) 150 unsigned long address)
197{ 151{
@@ -199,10 +153,11 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
199} 153}
200 154
201static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 155static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
202 unsigned long address) 156 unsigned long address)
203{ 157{
204 return (pgtable_t)pte_fragment_alloc(mm, address, 0); 158 return (pgtable_t)pte_fragment_alloc(mm, address, 0);
205} 159}
160#endif
206 161
207static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 162static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
208{ 163{
@@ -220,30 +175,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
220 tlb_flush_pgtable(tlb, address); 175 tlb_flush_pgtable(tlb, address);
221 pgtable_free_tlb(tlb, table, 0); 176 pgtable_free_tlb(tlb, table, 0);
222} 177}
223#endif /* CONFIG_PPC_4K_PAGES */
224
225static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
226{
227 return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
228 GFP_KERNEL|__GFP_REPEAT);
229}
230
231static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
232{
233 kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
234}
235
236static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
237 unsigned long address)
238{
239 return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
240}
241
242static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
243 unsigned long address)
244{
245 pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
246}
247 178
248#define check_pgt_cache() do { } while (0) 179#define check_pgt_cache() do { } while (0)
249 180
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
index be0cce7f7d4e..0c12a3bfe2ab 100644
--- a/arch/powerpc/include/asm/nohash/64/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
@@ -119,46 +119,14 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
119 __free_page(ptepage); 119 __free_page(ptepage);
120} 120}
121 121
122static inline void pgtable_free(void *table, unsigned index_size) 122extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
123{
124 if (!index_size)
125 free_page((unsigned long)table);
126 else {
127 BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
128 kmem_cache_free(PGT_CACHE(index_size), table);
129 }
130}
131
132#ifdef CONFIG_SMP 123#ifdef CONFIG_SMP
133static inline void pgtable_free_tlb(struct mmu_gather *tlb, 124extern void __tlb_remove_table(void *_table);
134 void *table, int shift) 125#endif
135{
136 unsigned long pgf = (unsigned long)table;
137 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
138 pgf |= shift;
139 tlb_remove_table(tlb, (void *)pgf);
140}
141
142static inline void __tlb_remove_table(void *_table)
143{
144 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
145 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
146
147 pgtable_free(table, shift);
148}
149#else /* !CONFIG_SMP */
150static inline void pgtable_free_tlb(struct mmu_gather *tlb,
151 void *table, int shift)
152{
153 pgtable_free(table, shift);
154}
155#endif /* CONFIG_SMP */
156
157static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, 126static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
158 unsigned long address) 127 unsigned long address)
159{ 128{
160 tlb_flush_pgtable(tlb, address); 129 tlb_flush_pgtable(tlb, address);
161 pgtable_page_dtor(table);
162 pgtable_free_tlb(tlb, page_address(table), 0); 130 pgtable_free_tlb(tlb, page_address(table), 0);
163} 131}
164 132
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 158574d2acf4..51db3a37bced 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -316,12 +316,20 @@ void arch_free_page(struct page *page, int order);
316#endif 316#endif
317 317
318struct vm_area_struct; 318struct vm_area_struct;
319 319#ifdef CONFIG_PPC_BOOK3S_64
320/*
321 * For BOOK3s 64 with 4k and 64K linux page size
322 * we want to use pointers, because the page table
323 * actually store pfn
324 */
325typedef pte_t *pgtable_t;
326#else
320#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64) 327#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64)
321typedef pte_t *pgtable_t; 328typedef pte_t *pgtable_t;
322#else 329#else
323typedef struct page *pgtable_t; 330typedef struct page *pgtable_t;
324#endif 331#endif
332#endif
325 333
326#include <asm-generic/memory_model.h> 334#include <asm-generic/memory_model.h>
327#endif /* __ASSEMBLY__ */ 335#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index a9a2238c5565..326c3d43edcd 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -367,6 +367,7 @@ pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel
367 367
368 return __alloc_for_cache(mm, kernel); 368 return __alloc_for_cache(mm, kernel);
369} 369}
370#endif /* CONFIG_PPC_64K_PAGES */
370 371
371void pte_fragment_free(unsigned long *table, int kernel) 372void pte_fragment_free(unsigned long *table, int kernel)
372{ 373{
@@ -413,7 +414,6 @@ void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
413 } 414 }
414} 415}
415#endif 416#endif
416#endif /* CONFIG_PPC_64K_PAGES */
417 417
418#ifdef CONFIG_TRANSPARENT_HUGEPAGE 418#ifdef CONFIG_TRANSPARENT_HUGEPAGE
419 419