diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/include/asm/hugetlb.h | 30 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu-hash64.h | 20 | ||||
-rw-r--r-- | arch/powerpc/include/asm/page.h | 13 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgalloc-64.h | 5 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 26 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 3 |
6 files changed, 75 insertions, 22 deletions
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 62e11a32c4c2..4daf7e684f58 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h | |||
@@ -6,6 +6,33 @@ | |||
6 | 6 | ||
7 | extern struct kmem_cache *hugepte_cache; | 7 | extern struct kmem_cache *hugepte_cache; |
8 | 8 | ||
9 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
10 | /* | ||
11 | * This should work for other subarchs too. But right now we use the | ||
12 | * new format only for 64bit book3s | ||
13 | */ | ||
14 | static inline pte_t *hugepd_page(hugepd_t hpd) | ||
15 | { | ||
16 | BUG_ON(!hugepd_ok(hpd)); | ||
17 | /* | ||
18 | * We have only four bits to encode, MMU page size | ||
19 | */ | ||
20 | BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf); | ||
21 | return (pte_t *)(hpd.pd & ~HUGEPD_SHIFT_MASK); | ||
22 | } | ||
23 | |||
24 | static inline unsigned int hugepd_mmu_psize(hugepd_t hpd) | ||
25 | { | ||
26 | return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2; | ||
27 | } | ||
28 | |||
29 | static inline unsigned int hugepd_shift(hugepd_t hpd) | ||
30 | { | ||
31 | return mmu_psize_to_shift(hugepd_mmu_psize(hpd)); | ||
32 | } | ||
33 | |||
34 | #else | ||
35 | |||
9 | static inline pte_t *hugepd_page(hugepd_t hpd) | 36 | static inline pte_t *hugepd_page(hugepd_t hpd) |
10 | { | 37 | { |
11 | BUG_ON(!hugepd_ok(hpd)); | 38 | BUG_ON(!hugepd_ok(hpd)); |
@@ -17,6 +44,9 @@ static inline unsigned int hugepd_shift(hugepd_t hpd) | |||
17 | return hpd.pd & HUGEPD_SHIFT_MASK; | 44 | return hpd.pd & HUGEPD_SHIFT_MASK; |
18 | } | 45 | } |
19 | 46 | ||
47 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
48 | |||
49 | |||
20 | static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, | 50 | static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, |
21 | unsigned pdshift) | 51 | unsigned pdshift) |
22 | { | 52 | { |
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index b59e06f507ea..05895cff1345 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -21,6 +21,7 @@ | |||
21 | * complete pgtable.h but only a portion of it. | 21 | * complete pgtable.h but only a portion of it. |
22 | */ | 22 | */ |
23 | #include <asm/pgtable-ppc64.h> | 23 | #include <asm/pgtable-ppc64.h> |
24 | #include <asm/bug.h> | ||
24 | 25 | ||
25 | /* | 26 | /* |
26 | * Segment table | 27 | * Segment table |
@@ -159,6 +160,24 @@ struct mmu_psize_def | |||
159 | unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */ | 160 | unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */ |
160 | unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */ | 161 | unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */ |
161 | }; | 162 | }; |
163 | extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; | ||
164 | |||
165 | static inline int shift_to_mmu_psize(unsigned int shift) | ||
166 | { | ||
167 | int psize; | ||
168 | |||
169 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) | ||
170 | if (mmu_psize_defs[psize].shift == shift) | ||
171 | return psize; | ||
172 | return -1; | ||
173 | } | ||
174 | |||
175 | static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) | ||
176 | { | ||
177 | if (mmu_psize_defs[mmu_psize].shift) | ||
178 | return mmu_psize_defs[mmu_psize].shift; | ||
179 | BUG(); | ||
180 | } | ||
162 | 181 | ||
163 | #endif /* __ASSEMBLY__ */ | 182 | #endif /* __ASSEMBLY__ */ |
164 | 183 | ||
@@ -193,7 +212,6 @@ static inline int segment_shift(int ssize) | |||
193 | /* | 212 | /* |
194 | * The current system page and segment sizes | 213 | * The current system page and segment sizes |
195 | */ | 214 | */ |
196 | extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; | ||
197 | extern int mmu_linear_psize; | 215 | extern int mmu_linear_psize; |
198 | extern int mmu_virtual_psize; | 216 | extern int mmu_virtual_psize; |
199 | extern int mmu_vmalloc_psize; | 217 | extern int mmu_vmalloc_psize; |
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index f072e974f8a2..652719ccd2e9 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h | |||
@@ -249,6 +249,7 @@ extern long long virt_phys_offset; | |||
249 | #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) | 249 | #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) |
250 | #endif | 250 | #endif |
251 | 251 | ||
252 | #ifndef CONFIG_PPC_BOOK3S_64 | ||
252 | /* | 253 | /* |
253 | * Use the top bit of the higher-level page table entries to indicate whether | 254 | * Use the top bit of the higher-level page table entries to indicate whether |
254 | * the entries we point to contain hugepages. This works because we know that | 255 | * the entries we point to contain hugepages. This works because we know that |
@@ -260,6 +261,7 @@ extern long long virt_phys_offset; | |||
260 | #else | 261 | #else |
261 | #define PD_HUGE 0x80000000 | 262 | #define PD_HUGE 0x80000000 |
262 | #endif | 263 | #endif |
264 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
263 | 265 | ||
264 | /* | 266 | /* |
265 | * Some number of bits at the level of the page table that points to | 267 | * Some number of bits at the level of the page table that points to |
@@ -354,10 +356,21 @@ typedef unsigned long pgprot_t; | |||
354 | typedef struct { signed long pd; } hugepd_t; | 356 | typedef struct { signed long pd; } hugepd_t; |
355 | 357 | ||
356 | #ifdef CONFIG_HUGETLB_PAGE | 358 | #ifdef CONFIG_HUGETLB_PAGE |
359 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
360 | static inline int hugepd_ok(hugepd_t hpd) | ||
361 | { | ||
362 | /* | ||
363 | * hugepd pointer, bottom two bits == 00 and next 4 bits | ||
364 | * indicate size of table | ||
365 | */ | ||
366 | return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); | ||
367 | } | ||
368 | #else | ||
357 | static inline int hugepd_ok(hugepd_t hpd) | 369 | static inline int hugepd_ok(hugepd_t hpd) |
358 | { | 370 | { |
359 | return (hpd.pd > 0); | 371 | return (hpd.pd > 0); |
360 | } | 372 | } |
373 | #endif | ||
361 | 374 | ||
362 | #define is_hugepd(pdep) (hugepd_ok(*((hugepd_t *)(pdep)))) | 375 | #define is_hugepd(pdep) (hugepd_ok(*((hugepd_t *)(pdep)))) |
363 | #else /* CONFIG_HUGETLB_PAGE */ | 376 | #else /* CONFIG_HUGETLB_PAGE */ |
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 292725cec2e3..69e352a5252b 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h | |||
@@ -35,7 +35,10 @@ struct vmemmap_backing { | |||
35 | #define MAX_PGTABLE_INDEX_SIZE 0xf | 35 | #define MAX_PGTABLE_INDEX_SIZE 0xf |
36 | 36 | ||
37 | extern struct kmem_cache *pgtable_cache[]; | 37 | extern struct kmem_cache *pgtable_cache[]; |
38 | #define PGT_CACHE(shift) (pgtable_cache[(shift)-1]) | 38 | #define PGT_CACHE(shift) ({ \ |
39 | BUG_ON(!(shift)); \ | ||
40 | pgtable_cache[(shift) - 1]; \ | ||
41 | }) | ||
39 | 42 | ||
40 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 43 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
41 | { | 44 | { |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 5dc52d803ed8..b4e2f24a9b8f 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -48,23 +48,6 @@ static u64 gpage_freearray[MAX_NUMBER_GPAGES]; | |||
48 | static unsigned nr_gpages; | 48 | static unsigned nr_gpages; |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | static inline int shift_to_mmu_psize(unsigned int shift) | ||
52 | { | ||
53 | int psize; | ||
54 | |||
55 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) | ||
56 | if (mmu_psize_defs[psize].shift == shift) | ||
57 | return psize; | ||
58 | return -1; | ||
59 | } | ||
60 | |||
61 | static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) | ||
62 | { | ||
63 | if (mmu_psize_defs[mmu_psize].shift) | ||
64 | return mmu_psize_defs[mmu_psize].shift; | ||
65 | BUG(); | ||
66 | } | ||
67 | |||
68 | #define hugepd_none(hpd) ((hpd).pd == 0) | 51 | #define hugepd_none(hpd) ((hpd).pd == 0) |
69 | 52 | ||
70 | pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift) | 53 | pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift) |
@@ -145,6 +128,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, | |||
145 | if (unlikely(!hugepd_none(*hpdp))) | 128 | if (unlikely(!hugepd_none(*hpdp))) |
146 | break; | 129 | break; |
147 | else | 130 | else |
131 | /* We use the old format for PPC_FSL_BOOK3E */ | ||
148 | hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; | 132 | hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; |
149 | } | 133 | } |
150 | /* If we bailed from the for loop early, an error occurred, clean up */ | 134 | /* If we bailed from the for loop early, an error occurred, clean up */ |
@@ -156,9 +140,15 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, | |||
156 | #else | 140 | #else |
157 | if (!hugepd_none(*hpdp)) | 141 | if (!hugepd_none(*hpdp)) |
158 | kmem_cache_free(cachep, new); | 142 | kmem_cache_free(cachep, new); |
159 | else | 143 | else { |
144 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
145 | hpdp->pd = (unsigned long)new | | ||
146 | (shift_to_mmu_psize(pshift) << 2); | ||
147 | #else | ||
160 | hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; | 148 | hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; |
161 | #endif | 149 | #endif |
150 | } | ||
151 | #endif | ||
162 | spin_unlock(&mm->page_table_lock); | 152 | spin_unlock(&mm->page_table_lock); |
163 | return 0; | 153 | return 0; |
164 | } | 154 | } |
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 7e2246fb2f31..a56de85ad3b7 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -129,8 +129,7 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) | |||
129 | align = max_t(unsigned long, align, minalign); | 129 | align = max_t(unsigned long, align, minalign); |
130 | name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); | 130 | name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); |
131 | new = kmem_cache_create(name, table_size, align, 0, ctor); | 131 | new = kmem_cache_create(name, table_size, align, 0, ctor); |
132 | PGT_CACHE(shift) = new; | 132 | pgtable_cache[shift - 1] = new; |
133 | |||
134 | pr_debug("Allocated pgtable cache for order %d\n", shift); | 133 | pr_debug("Allocated pgtable cache for order %d\n", shift); |
135 | } | 134 | } |
136 | 135 | ||