aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2013-10-11 20:22:38 -0400
committerScott Wood <scottwood@freescale.com>2014-01-09 18:52:19 -0500
commit28efc35fe68dacbddc4b12c2fa8f2df1593a4ad3 (patch)
treef4565fcf8b9f1a905a0b3a0e977741092cba7921
parent47ce8af4209f4344f152aa6fc538efe9d6bdfd1a (diff)
powerpc/e6500: TLB miss handler with hardware tablewalk support
There are a few things that make the existing hw tablewalk handlers unsuitable for e6500: - Indirect entries go in TLB1 (though the resulting direct entries go in TLB0). - It has threads, but no "tlbsrx." -- so we need a spinlock and a normal "tlbsx". Because we need this lock, hardware tablewalk is mandatory on e6500 unless we want to add spinlock+tlbsx to the normal bolted TLB miss handler. - TLB1 has no HES (nor next-victim hint) so we need software round robin (TODO: integrate this round robin data with hugetlb/KVM) - The existing tablewalk handlers map half of a page table at a time, because IBM hardware has a fixed 1MiB indirect page size. e6500 has variable size indirect entries, with a minimum of 2MiB. So we can't do the half-page indirect mapping, and even if we could it would be less efficient than mapping the full page. - Like on e5500, the linear mapping is bolted, so we don't need the overhead of supporting nested tlb misses. Note that hardware tablewalk does not work in rev1 of e6500. We do not expect to support e6500 rev1 in mainline Linux. Signed-off-by: Scott Wood <scottwood@freescale.com> Cc: Mihai Caraman <mihai.caraman@freescale.com>
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h13
-rw-r--r--arch/powerpc/include/asm/mmu.h21
-rw-r--r--arch/powerpc/include/asm/paca.h6
-rw-r--r--arch/powerpc/kernel/asm-offsets.c9
-rw-r--r--arch/powerpc/kernel/paca.c5
-rw-r--r--arch/powerpc/kernel/setup_64.c31
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c7
-rw-r--r--arch/powerpc/mm/mem.c6
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S171
-rw-r--r--arch/powerpc/mm/tlb_nohash.c93
10 files changed, 326 insertions, 36 deletions
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 936db360790a..89b785d16846 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -286,8 +286,21 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
286extern int mmu_linear_psize; 286extern int mmu_linear_psize;
287extern int mmu_vmemmap_psize; 287extern int mmu_vmemmap_psize;
288 288
289struct tlb_core_data {
290 /* For software way selection, as on Freescale TLB1 */
291 u8 esel_next, esel_max, esel_first;
292
293 /* Per-core spinlock for e6500 TLB handlers (no tlbsrx.) */
294 u8 lock;
295};
296
289#ifdef CONFIG_PPC64 297#ifdef CONFIG_PPC64
290extern unsigned long linear_map_top; 298extern unsigned long linear_map_top;
299extern int book3e_htw_mode;
300
301#define PPC_HTW_NONE 0
302#define PPC_HTW_IBM 1
303#define PPC_HTW_E6500 2
291 304
292/* 305/*
293 * 64-bit booke platforms don't load the tlb in the tlb miss handler code. 306 * 64-bit booke platforms don't load the tlb in the tlb miss handler code.
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 691fd8aca939..f8d1d6dcf7db 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -180,16 +180,17 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
180#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */ 180#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */
181#define MMU_PAGE_256K 4 181#define MMU_PAGE_256K 4
182#define MMU_PAGE_1M 5 182#define MMU_PAGE_1M 5
183#define MMU_PAGE_4M 6 183#define MMU_PAGE_2M 6
184#define MMU_PAGE_8M 7 184#define MMU_PAGE_4M 7
185#define MMU_PAGE_16M 8 185#define MMU_PAGE_8M 8
186#define MMU_PAGE_64M 9 186#define MMU_PAGE_16M 9
187#define MMU_PAGE_256M 10 187#define MMU_PAGE_64M 10
188#define MMU_PAGE_1G 11 188#define MMU_PAGE_256M 11
189#define MMU_PAGE_16G 12 189#define MMU_PAGE_1G 12
190#define MMU_PAGE_64G 13 190#define MMU_PAGE_16G 13
191 191#define MMU_PAGE_64G 14
192#define MMU_PAGE_COUNT 14 192
193#define MMU_PAGE_COUNT 15
193 194
194#if defined(CONFIG_PPC_STD_MMU_64) 195#if defined(CONFIG_PPC_STD_MMU_64)
195/* 64-bit classic hash table MMU */ 196/* 64-bit classic hash table MMU */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index c3523d1dda58..e81731c62a7f 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -113,6 +113,10 @@ struct paca_struct {
113 /* Keep pgd in the same cacheline as the start of extlb */ 113 /* Keep pgd in the same cacheline as the start of extlb */
114 pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */ 114 pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */
115 pgd_t *kernel_pgd; /* Kernel PGD */ 115 pgd_t *kernel_pgd; /* Kernel PGD */
116
117 /* Shared by all threads of a core -- points to tcd of first thread */
118 struct tlb_core_data *tcd_ptr;
119
116 /* We can have up to 3 levels of reentrancy in the TLB miss handler */ 120 /* We can have up to 3 levels of reentrancy in the TLB miss handler */
117 u64 extlb[3][EX_TLB_SIZE / sizeof(u64)]; 121 u64 extlb[3][EX_TLB_SIZE / sizeof(u64)];
118 u64 exmc[8]; /* used for machine checks */ 122 u64 exmc[8]; /* used for machine checks */
@@ -123,6 +127,8 @@ struct paca_struct {
123 void *mc_kstack; 127 void *mc_kstack;
124 void *crit_kstack; 128 void *crit_kstack;
125 void *dbg_kstack; 129 void *dbg_kstack;
130
131 struct tlb_core_data tcd;
126#endif /* CONFIG_PPC_BOOK3E */ 132#endif /* CONFIG_PPC_BOOK3E */
127 133
128 mm_context_t context; 134 mm_context_t context;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 41a283956a29..ed8d68ce71f3 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -203,6 +203,15 @@ int main(void)
203 DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack)); 203 DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack));
204 DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack)); 204 DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack));
205 DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack)); 205 DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack));
206 DEFINE(PACA_TCD_PTR, offsetof(struct paca_struct, tcd_ptr));
207
208 DEFINE(TCD_ESEL_NEXT,
209 offsetof(struct tlb_core_data, esel_next));
210 DEFINE(TCD_ESEL_MAX,
211 offsetof(struct tlb_core_data, esel_max));
212 DEFINE(TCD_ESEL_FIRST,
213 offsetof(struct tlb_core_data, esel_first));
214 DEFINE(TCD_LOCK, offsetof(struct tlb_core_data, lock));
206#endif /* CONFIG_PPC_BOOK3E */ 215#endif /* CONFIG_PPC_BOOK3E */
207 216
208#ifdef CONFIG_PPC_STD_MMU_64 217#ifdef CONFIG_PPC_STD_MMU_64
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 623c356fe34f..bf0aada02fe4 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -160,6 +160,11 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
160#ifdef CONFIG_PPC_STD_MMU_64 160#ifdef CONFIG_PPC_STD_MMU_64
161 new_paca->slb_shadow_ptr = init_slb_shadow(cpu); 161 new_paca->slb_shadow_ptr = init_slb_shadow(cpu);
162#endif /* CONFIG_PPC_STD_MMU_64 */ 162#endif /* CONFIG_PPC_STD_MMU_64 */
163
164#ifdef CONFIG_PPC_BOOK3E
165 /* For now -- if we have threads this will be adjusted later */
166 new_paca->tcd_ptr = &new_paca->tcd;
167#endif
163} 168}
164 169
165/* Put the paca pointer into r13 and SPRG_PACA */ 170/* Put the paca pointer into r13 and SPRG_PACA */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 2232aff66059..1ce9b87d7df8 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -97,6 +97,36 @@ int dcache_bsize;
97int icache_bsize; 97int icache_bsize;
98int ucache_bsize; 98int ucache_bsize;
99 99
100#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
101static void setup_tlb_core_data(void)
102{
103 int cpu;
104
105 for_each_possible_cpu(cpu) {
106 int first = cpu_first_thread_sibling(cpu);
107
108 paca[cpu].tcd_ptr = &paca[first].tcd;
109
110 /*
111 * If we have threads, we need either tlbsrx.
112 * or e6500 tablewalk mode, or else TLB handlers
113 * will be racy and could produce duplicate entries.
114 */
115 if (smt_enabled_at_boot >= 2 &&
116 !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
117 book3e_htw_mode != PPC_HTW_E6500) {
118 /* Should we panic instead? */
119 WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n",
120 __func__);
121 }
122 }
123}
124#else
125static void setup_tlb_core_data(void)
126{
127}
128#endif
129
100#ifdef CONFIG_SMP 130#ifdef CONFIG_SMP
101 131
102static char *smt_enabled_cmdline; 132static char *smt_enabled_cmdline;
@@ -445,6 +475,7 @@ void __init setup_system(void)
445 475
446 smp_setup_cpu_maps(); 476 smp_setup_cpu_maps();
447 check_smt_enabled(); 477 check_smt_enabled();
478 setup_tlb_core_data();
448 479
449#ifdef CONFIG_SMP 480#ifdef CONFIG_SMP
450 /* Release secondary cpus out of their spinloops at 0x60 now that 481 /* Release secondary cpus out of their spinloops at 0x60 now that
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index a68671c18ad4..94cd728166d3 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -52,6 +52,7 @@
52#include <asm/smp.h> 52#include <asm/smp.h>
53#include <asm/machdep.h> 53#include <asm/machdep.h>
54#include <asm/setup.h> 54#include <asm/setup.h>
55#include <asm/paca.h>
55 56
56#include "mmu_decl.h" 57#include "mmu_decl.h"
57 58
@@ -191,6 +192,12 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
191 } 192 }
192 tlbcam_index = i; 193 tlbcam_index = i;
193 194
195#ifdef CONFIG_PPC64
196 get_paca()->tcd.esel_next = i;
197 get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
198 get_paca()->tcd.esel_first = i;
199#endif
200
194 return amount_mapped; 201 return amount_mapped;
195} 202}
196 203
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 3fa93dc7fe75..94448cd4b444 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -307,6 +307,12 @@ static void __init register_page_bootmem_info(void)
307 307
308void __init mem_init(void) 308void __init mem_init(void)
309{ 309{
310 /*
311 * book3s is limited to 16 page sizes due to encoding this in
312 * a 4-bit field for slices.
313 */
314 BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
315
310#ifdef CONFIG_SWIOTLB 316#ifdef CONFIG_SWIOTLB
311 swiotlb_init(0); 317 swiotlb_init(0);
312#endif 318#endif
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index b4113bf86353..75f5d2777f61 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -239,6 +239,177 @@ itlb_miss_fault_bolted:
239 beq tlb_miss_common_bolted 239 beq tlb_miss_common_bolted
240 b itlb_miss_kernel_bolted 240 b itlb_miss_kernel_bolted
241 241
242/*
243 * TLB miss handling for e6500 and derivatives, using hardware tablewalk.
244 *
245 * Linear mapping is bolted: no virtual page table or nested TLB misses
246 * Indirect entries in TLB1, hardware loads resulting direct entries
247 * into TLB0
248 * No HES or NV hint on TLB1, so we need to do software round-robin
249 * No tlbsrx. so we need a spinlock, and we have to deal
250 * with MAS-damage caused by tlbsx
251 * 4K pages only
252 */
253
254 START_EXCEPTION(instruction_tlb_miss_e6500)
255 tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
256
257 ld r11,PACA_TCD_PTR(r13)
258 srdi. r15,r16,60 /* get region */
259 ori r16,r16,1
260
261 TLB_MISS_STATS_SAVE_INFO_BOLTED
262 bne tlb_miss_kernel_e6500 /* user/kernel test */
263
264 b tlb_miss_common_e6500
265
266 START_EXCEPTION(data_tlb_miss_e6500)
267 tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
268
269 ld r11,PACA_TCD_PTR(r13)
270 srdi. r15,r16,60 /* get region */
271 rldicr r16,r16,0,62
272
273 TLB_MISS_STATS_SAVE_INFO_BOLTED
274 bne tlb_miss_kernel_e6500 /* user vs kernel check */
275
276/*
277 * This is the guts of the TLB miss handler for e6500 and derivatives.
278 * We are entered with:
279 *
280 * r16 = page of faulting address (low bit 0 if data, 1 if instruction)
281 * r15 = crap (free to use)
282 * r14 = page table base
283 * r13 = PACA
284 * r11 = tlb_per_core ptr
285 * r10 = crap (free to use)
286 */
287tlb_miss_common_e6500:
288 /*
289 * Search if we already have an indirect entry for that virtual
290 * address, and if we do, bail out.
291 *
292 * MAS6:IND should be already set based on MAS4
293 */
294 addi r10,r11,TCD_LOCK
2951: lbarx r15,0,r10
296 cmpdi r15,0
297 bne 2f
298 li r15,1
299 stbcx. r15,0,r10
300 bne 1b
301 .subsection 1
3022: lbz r15,0(r10)
303 cmpdi r15,0
304 bne 2b
305 b 1b
306 .previous
307
308 mfspr r15,SPRN_MAS2
309
310 tlbsx 0,r16
311 mfspr r10,SPRN_MAS1
312 andis. r10,r10,MAS1_VALID@h
313 bne tlb_miss_done_e6500
314
315 /* Undo MAS-damage from the tlbsx */
316 mfspr r10,SPRN_MAS1
317 oris r10,r10,MAS1_VALID@h
318 mtspr SPRN_MAS1,r10
319 mtspr SPRN_MAS2,r15
320
321 /* Now, we need to walk the page tables. First check if we are in
322 * range.
323 */
324 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
325 bne- tlb_miss_fault_e6500
326
327 rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
328 cmpldi cr0,r14,0
329 clrrdi r15,r15,3
330 beq- tlb_miss_fault_e6500 /* No PGDIR, bail */
331 ldx r14,r14,r15 /* grab pgd entry */
332
333 rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
334 clrrdi r15,r15,3
335 cmpdi cr0,r14,0
336 bge tlb_miss_fault_e6500 /* Bad pgd entry or hugepage; bail */
337 ldx r14,r14,r15 /* grab pud entry */
338
339 rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
340 clrrdi r15,r15,3
341 cmpdi cr0,r14,0
342 bge tlb_miss_fault_e6500
343 ldx r14,r14,r15 /* Grab pmd entry */
344
345 mfspr r10,SPRN_MAS0
346 cmpdi cr0,r14,0
347 bge tlb_miss_fault_e6500
348
349 /* Now we build the MAS for a 2M indirect page:
350 *
351 * MAS 0 : ESEL needs to be filled by software round-robin
352 * MAS 1 : Fully set up
353 * - PID already updated by caller if necessary
354 * - TSIZE for now is base ind page size always
355 * - TID already cleared if necessary
356 * MAS 2 : Default not 2M-aligned, need to be redone
357 * MAS 3+7 : Needs to be done
358 */
359
360 ori r14,r14,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
361 mtspr SPRN_MAS7_MAS3,r14
362
363 clrrdi r15,r16,21 /* make EA 2M-aligned */
364 mtspr SPRN_MAS2,r15
365
366 lbz r15,TCD_ESEL_NEXT(r11)
367 lbz r16,TCD_ESEL_MAX(r11)
368 lbz r14,TCD_ESEL_FIRST(r11)
369 rlwimi r10,r15,16,0x00ff0000 /* insert esel_next into MAS0 */
370 addi r15,r15,1 /* increment esel_next */
371 mtspr SPRN_MAS0,r10
372 cmpw r15,r16
373 iseleq r15,r14,r15 /* if next == last use first */
374 stb r15,TCD_ESEL_NEXT(r11)
375
376 tlbwe
377
378tlb_miss_done_e6500:
379 .macro tlb_unlock_e6500
380 li r15,0
381 isync
382 stb r15,TCD_LOCK(r11)
383 .endm
384
385 tlb_unlock_e6500
386 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
387 tlb_epilog_bolted
388 rfi
389
390tlb_miss_kernel_e6500:
391 mfspr r10,SPRN_MAS1
392 ld r14,PACA_KERNELPGD(r13)
393 cmpldi cr0,r15,8 /* Check for vmalloc region */
394 rlwinm r10,r10,0,16,1 /* Clear TID */
395 mtspr SPRN_MAS1,r10
396 beq+ tlb_miss_common_e6500
397
398tlb_miss_fault_e6500:
399 tlb_unlock_e6500
400 /* We need to check if it was an instruction miss */
401 andi. r16,r16,1
402 bne itlb_miss_fault_e6500
403dtlb_miss_fault_e6500:
404 TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
405 tlb_epilog_bolted
406 b exc_data_storage_book3e
407itlb_miss_fault_e6500:
408 TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
409 tlb_epilog_bolted
410 b exc_instruction_storage_book3e
411
412
242/********************************************************************** 413/**********************************************************************
243 * * 414 * *
244 * TLB miss handling for Book3E with TLB reservation and HES support * 415 * TLB miss handling for Book3E with TLB reservation and HES support *
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 8805b7b87dc6..735839b74dc5 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -43,6 +43,7 @@
43#include <asm/tlb.h> 43#include <asm/tlb.h>
44#include <asm/code-patching.h> 44#include <asm/code-patching.h>
45#include <asm/hugetlb.h> 45#include <asm/hugetlb.h>
46#include <asm/paca.h>
46 47
47#include "mmu_decl.h" 48#include "mmu_decl.h"
48 49
@@ -58,6 +59,10 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
58 .shift = 12, 59 .shift = 12,
59 .enc = BOOK3E_PAGESZ_4K, 60 .enc = BOOK3E_PAGESZ_4K,
60 }, 61 },
62 [MMU_PAGE_2M] = {
63 .shift = 21,
64 .enc = BOOK3E_PAGESZ_2M,
65 },
61 [MMU_PAGE_4M] = { 66 [MMU_PAGE_4M] = {
62 .shift = 22, 67 .shift = 22,
63 .enc = BOOK3E_PAGESZ_4M, 68 .enc = BOOK3E_PAGESZ_4M,
@@ -136,7 +141,7 @@ static inline int mmu_get_tsize(int psize)
136int mmu_linear_psize; /* Page size used for the linear mapping */ 141int mmu_linear_psize; /* Page size used for the linear mapping */
137int mmu_pte_psize; /* Page size used for PTE pages */ 142int mmu_pte_psize; /* Page size used for PTE pages */
138int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ 143int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
139int book3e_htw_enabled; /* Is HW tablewalk enabled ? */ 144int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
140unsigned long linear_map_top; /* Top of linear mapping */ 145unsigned long linear_map_top; /* Top of linear mapping */
141 146
142#endif /* CONFIG_PPC64 */ 147#endif /* CONFIG_PPC64 */
@@ -377,7 +382,7 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
377{ 382{
378 int tsize = mmu_psize_defs[mmu_pte_psize].enc; 383 int tsize = mmu_psize_defs[mmu_pte_psize].enc;
379 384
380 if (book3e_htw_enabled) { 385 if (book3e_htw_mode != PPC_HTW_NONE) {
381 unsigned long start = address & PMD_MASK; 386 unsigned long start = address & PMD_MASK;
382 unsigned long end = address + PMD_SIZE; 387 unsigned long end = address + PMD_SIZE;
383 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; 388 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
@@ -430,7 +435,7 @@ static void setup_page_sizes(void)
430 def = &mmu_psize_defs[psize]; 435 def = &mmu_psize_defs[psize];
431 shift = def->shift; 436 shift = def->shift;
432 437
433 if (shift == 0) 438 if (shift == 0 || shift & 1)
434 continue; 439 continue;
435 440
436 /* adjust to be in terms of 4^shift Kb */ 441 /* adjust to be in terms of 4^shift Kb */
@@ -440,21 +445,40 @@ static void setup_page_sizes(void)
440 def->flags |= MMU_PAGE_SIZE_DIRECT; 445 def->flags |= MMU_PAGE_SIZE_DIRECT;
441 } 446 }
442 447
443 goto no_indirect; 448 goto out;
444 } 449 }
445 450
446 if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { 451 if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
447 u32 tlb1ps = mfspr(SPRN_TLB1PS); 452 u32 tlb1cfg, tlb1ps;
453
454 tlb0cfg = mfspr(SPRN_TLB0CFG);
455 tlb1cfg = mfspr(SPRN_TLB1CFG);
456 tlb1ps = mfspr(SPRN_TLB1PS);
457 eptcfg = mfspr(SPRN_EPTCFG);
458
459 if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
460 book3e_htw_mode = PPC_HTW_E6500;
461
462 /*
463 * We expect 4K subpage size and unrestricted indirect size.
464 * The lack of a restriction on indirect size is a Freescale
465 * extension, indicated by PSn = 0 but SPSn != 0.
466 */
467 if (eptcfg != 2)
468 book3e_htw_mode = PPC_HTW_NONE;
448 469
449 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 470 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
450 struct mmu_psize_def *def = &mmu_psize_defs[psize]; 471 struct mmu_psize_def *def = &mmu_psize_defs[psize];
451 472
452 if (tlb1ps & (1U << (def->shift - 10))) { 473 if (tlb1ps & (1U << (def->shift - 10))) {
453 def->flags |= MMU_PAGE_SIZE_DIRECT; 474 def->flags |= MMU_PAGE_SIZE_DIRECT;
475
476 if (book3e_htw_mode && psize == MMU_PAGE_2M)
477 def->flags |= MMU_PAGE_SIZE_INDIRECT;
454 } 478 }
455 } 479 }
456 480
457 goto no_indirect; 481 goto out;
458 } 482 }
459#endif 483#endif
460 484
@@ -471,8 +495,11 @@ static void setup_page_sizes(void)
471 } 495 }
472 496
473 /* Indirect page sizes supported ? */ 497 /* Indirect page sizes supported ? */
474 if ((tlb0cfg & TLBnCFG_IND) == 0) 498 if ((tlb0cfg & TLBnCFG_IND) == 0 ||
475 goto no_indirect; 499 (tlb0cfg & TLBnCFG_PT) == 0)
500 goto out;
501
502 book3e_htw_mode = PPC_HTW_IBM;
476 503
477 /* Now, we only deal with one IND page size for each 504 /* Now, we only deal with one IND page size for each
478 * direct size. Hopefully all implementations today are 505 * direct size. Hopefully all implementations today are
@@ -497,8 +524,8 @@ static void setup_page_sizes(void)
497 def->ind = ps + 10; 524 def->ind = ps + 10;
498 } 525 }
499 } 526 }
500 no_indirect:
501 527
528out:
502 /* Cleanup array and print summary */ 529 /* Cleanup array and print summary */
503 pr_info("MMU: Supported page sizes\n"); 530 pr_info("MMU: Supported page sizes\n");
504 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 531 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
@@ -520,23 +547,23 @@ static void setup_page_sizes(void)
520 547
521static void setup_mmu_htw(void) 548static void setup_mmu_htw(void)
522{ 549{
523 /* Check if HW tablewalk is present, and if yes, enable it by: 550 /*
524 * 551 * If we want to use HW tablewalk, enable it by patching the TLB miss
525 * - patching the TLB miss handlers to branch to the 552 * handlers to branch to the one dedicated to it.
526 * one dedicates to it 553 */
527 *
528 * - setting the global book3e_htw_enabled
529 */
530 unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);
531 554
532 if ((tlb0cfg & TLBnCFG_IND) && 555 switch (book3e_htw_mode) {
533 (tlb0cfg & TLBnCFG_PT)) { 556 case PPC_HTW_IBM:
534 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); 557 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
535 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); 558 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
536 book3e_htw_enabled = 1; 559 break;
560 case PPC_HTW_E6500:
561 patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
562 patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
563 break;
537 } 564 }
538 pr_info("MMU: Book3E HW tablewalk %s\n", 565 pr_info("MMU: Book3E HW tablewalk %s\n",
539 book3e_htw_enabled ? "enabled" : "not supported"); 566 book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
540} 567}
541 568
542/* 569/*
@@ -576,8 +603,16 @@ static void __early_init_mmu(int boot_cpu)
576 /* Set MAS4 based on page table setting */ 603 /* Set MAS4 based on page table setting */
577 604
578 mas4 = 0x4 << MAS4_WIMGED_SHIFT; 605 mas4 = 0x4 << MAS4_WIMGED_SHIFT;
579 if (book3e_htw_enabled) { 606 switch (book3e_htw_mode) {
580 mas4 |= mas4 | MAS4_INDD; 607 case PPC_HTW_E6500:
608 mas4 |= MAS4_INDD;
609 mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
610 mas4 |= MAS4_TLBSELD(1);
611 mmu_pte_psize = MMU_PAGE_2M;
612 break;
613
614 case PPC_HTW_IBM:
615 mas4 |= MAS4_INDD;
581#ifdef CONFIG_PPC_64K_PAGES 616#ifdef CONFIG_PPC_64K_PAGES
582 mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT; 617 mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
583 mmu_pte_psize = MMU_PAGE_256M; 618 mmu_pte_psize = MMU_PAGE_256M;
@@ -585,13 +620,16 @@ static void __early_init_mmu(int boot_cpu)
585 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT; 620 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
586 mmu_pte_psize = MMU_PAGE_1M; 621 mmu_pte_psize = MMU_PAGE_1M;
587#endif 622#endif
588 } else { 623 break;
624
625 case PPC_HTW_NONE:
589#ifdef CONFIG_PPC_64K_PAGES 626#ifdef CONFIG_PPC_64K_PAGES
590 mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT; 627 mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
591#else 628#else
592 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; 629 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
593#endif 630#endif
594 mmu_pte_psize = mmu_virtual_psize; 631 mmu_pte_psize = mmu_virtual_psize;
632 break;
595 } 633 }
596 mtspr(SPRN_MAS4, mas4); 634 mtspr(SPRN_MAS4, mas4);
597 635
@@ -611,8 +649,11 @@ static void __early_init_mmu(int boot_cpu)
611 /* limit memory so we dont have linear faults */ 649 /* limit memory so we dont have linear faults */
612 memblock_enforce_memory_limit(linear_map_top); 650 memblock_enforce_memory_limit(linear_map_top);
613 651
614 patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); 652 if (book3e_htw_mode == PPC_HTW_NONE) {
615 patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e); 653 patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
654 patch_exception(0x1e0,
655 exc_instruction_tlb_miss_bolted_book3e);
656 }
616 } 657 }
617#endif 658#endif
618 659