aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-07-23 19:15:47 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-08-19 20:25:09 -0400
commit25d21ad6e799cccd097b9df2a2fefe19a7e1dfcf (patch)
treecd381527a069fed6cffa8755cac177639cc48b0b
parenta8f7758c1c52a13e031266483efd5525157e43e9 (diff)
powerpc: Add TLB management code for 64-bit Book3E
This adds the TLB miss handler assembly, the low level TLB flush routines along with the necessary hook for dealing with our virtual page tables or indirect TLB entries that need to be flushes when PTE pages are freed. There is currently no support for hugetlbfs Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/include/asm/mmu-40x.h3
-rw-r--r--arch/powerpc/include/asm/mmu-44x.h6
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h3
-rw-r--r--arch/powerpc/include/asm/mmu-hash32.h6
-rw-r--r--arch/powerpc/include/asm/mmu_context.h8
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/mm/mmu_decl.h14
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S734
-rw-r--r--arch/powerpc/mm/tlb_nohash.c203
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S79
10 files changed, 1055 insertions, 5 deletions
diff --git a/arch/powerpc/include/asm/mmu-40x.h b/arch/powerpc/include/asm/mmu-40x.h
index 776f415a36aa..34916865eaef 100644
--- a/arch/powerpc/include/asm/mmu-40x.h
+++ b/arch/powerpc/include/asm/mmu-40x.h
@@ -61,4 +61,7 @@ typedef struct {
61 61
62#endif /* !__ASSEMBLY__ */ 62#endif /* !__ASSEMBLY__ */
63 63
64#define mmu_virtual_psize MMU_PAGE_4K
65#define mmu_linear_psize MMU_PAGE_256M
66
64#endif /* _ASM_POWERPC_MMU_40X_H_ */ 67#endif /* _ASM_POWERPC_MMU_40X_H_ */
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h
index 3c86576bfefa..0372669383a8 100644
--- a/arch/powerpc/include/asm/mmu-44x.h
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -79,16 +79,22 @@ typedef struct {
79 79
80#if (PAGE_SHIFT == 12) 80#if (PAGE_SHIFT == 12)
81#define PPC44x_TLBE_SIZE PPC44x_TLB_4K 81#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
82#define mmu_virtual_psize MMU_PAGE_4K
82#elif (PAGE_SHIFT == 14) 83#elif (PAGE_SHIFT == 14)
83#define PPC44x_TLBE_SIZE PPC44x_TLB_16K 84#define PPC44x_TLBE_SIZE PPC44x_TLB_16K
85#define mmu_virtual_psize MMU_PAGE_16K
84#elif (PAGE_SHIFT == 16) 86#elif (PAGE_SHIFT == 16)
85#define PPC44x_TLBE_SIZE PPC44x_TLB_64K 87#define PPC44x_TLBE_SIZE PPC44x_TLB_64K
88#define mmu_virtual_psize MMU_PAGE_64K
86#elif (PAGE_SHIFT == 18) 89#elif (PAGE_SHIFT == 18)
87#define PPC44x_TLBE_SIZE PPC44x_TLB_256K 90#define PPC44x_TLBE_SIZE PPC44x_TLB_256K
91#define mmu_virtual_psize MMU_PAGE_256K
88#else 92#else
89#error "Unsupported PAGE_SIZE" 93#error "Unsupported PAGE_SIZE"
90#endif 94#endif
91 95
96#define mmu_linear_psize MMU_PAGE_256M
97
92#define PPC44x_PGD_OFF_SHIFT (32 - PGDIR_SHIFT + PGD_T_LOG2) 98#define PPC44x_PGD_OFF_SHIFT (32 - PGDIR_SHIFT + PGD_T_LOG2)
93#define PPC44x_PGD_OFF_MASK_BIT (PGDIR_SHIFT - PGD_T_LOG2) 99#define PPC44x_PGD_OFF_MASK_BIT (PGDIR_SHIFT - PGD_T_LOG2)
94#define PPC44x_PTE_ADD_SHIFT (32 - PGDIR_SHIFT + PTE_SHIFT + PTE_T_LOG2) 100#define PPC44x_PTE_ADD_SHIFT (32 - PGDIR_SHIFT + PTE_SHIFT + PTE_T_LOG2)
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 07865a357848..3d11d3ce79ec 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -143,4 +143,7 @@ typedef struct {
143} mm_context_t; 143} mm_context_t;
144#endif /* !__ASSEMBLY__ */ 144#endif /* !__ASSEMBLY__ */
145 145
146#define mmu_virtual_psize MMU_PAGE_4K
147#define mmu_linear_psize MMU_PAGE_8M
148
146#endif /* _ASM_POWERPC_MMU_8XX_H_ */ 149#endif /* _ASM_POWERPC_MMU_8XX_H_ */
diff --git a/arch/powerpc/include/asm/mmu-hash32.h b/arch/powerpc/include/asm/mmu-hash32.h
index 16b1a1e77e64..382fc689f204 100644
--- a/arch/powerpc/include/asm/mmu-hash32.h
+++ b/arch/powerpc/include/asm/mmu-hash32.h
@@ -80,4 +80,10 @@ typedef struct {
80 80
81#endif /* !__ASSEMBLY__ */ 81#endif /* !__ASSEMBLY__ */
82 82
83/* We happily ignore the smaller BATs on 601, we don't actually use
84 * those definitions on hash32 at the moment anyway
85 */
86#define mmu_virtual_psize MMU_PAGE_4K
87#define mmu_linear_psize MMU_PAGE_256M
88
83#endif /* _ASM_POWERPC_MMU_HASH32_H_ */ 89#endif /* _ASM_POWERPC_MMU_HASH32_H_ */
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 8dffed317013..b34e94d94435 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -43,6 +43,10 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
43 tsk->thread.pgdir = next->pgd; 43 tsk->thread.pgdir = next->pgd;
44#endif /* CONFIG_PPC32 */ 44#endif /* CONFIG_PPC32 */
45 45
46 /* 64-bit Book3E keeps track of current PGD in the PACA */
47#ifdef CONFIG_PPC_BOOK3E_64
48 get_paca()->pgd = next->pgd;
49#endif
46 /* Nothing else to do if we aren't actually switching */ 50 /* Nothing else to do if we aren't actually switching */
47 if (prev == next) 51 if (prev == next)
48 return; 52 return;
@@ -89,6 +93,10 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
89static inline void enter_lazy_tlb(struct mm_struct *mm, 93static inline void enter_lazy_tlb(struct mm_struct *mm,
90 struct task_struct *tsk) 94 struct task_struct *tsk)
91{ 95{
96 /* 64-bit Book3E keeps track of current PGD in the PACA */
97#ifdef CONFIG_PPC_BOOK3E_64
98 get_paca()->pgd = NULL;
99#endif
92} 100}
93 101
94#endif /* __KERNEL__ */ 102#endif /* __KERNEL__ */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a6b6c4c9ae41..65aced7b833a 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -62,6 +62,7 @@
62#include <asm/udbg.h> 62#include <asm/udbg.h>
63#include <asm/kexec.h> 63#include <asm/kexec.h>
64#include <asm/swiotlb.h> 64#include <asm/swiotlb.h>
65#include <asm/mmu_context.h>
65 66
66#include "setup.h" 67#include "setup.h"
67 68
@@ -147,6 +148,9 @@ void __init setup_paca(int cpu)
147{ 148{
148 local_paca = &paca[cpu]; 149 local_paca = &paca[cpu];
149 mtspr(SPRN_SPRG_PACA, local_paca); 150 mtspr(SPRN_SPRG_PACA, local_paca);
151#ifdef CONFIG_PPC_BOOK3E
152 mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
153#endif
150} 154}
151 155
152/* 156/*
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 3871dceee2dd..5961c6b739dd 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -41,7 +41,11 @@ static inline void _tlbil_pid(unsigned int pid)
41#else /* CONFIG_40x || CONFIG_8xx */ 41#else /* CONFIG_40x || CONFIG_8xx */
42extern void _tlbil_all(void); 42extern void _tlbil_all(void);
43extern void _tlbil_pid(unsigned int pid); 43extern void _tlbil_pid(unsigned int pid);
44#ifdef CONFIG_PPC_BOOK3E
45extern void _tlbil_pid_noind(unsigned int pid);
46#else
44#define _tlbil_pid_noind(pid) _tlbil_pid(pid) 47#define _tlbil_pid_noind(pid) _tlbil_pid(pid)
48#endif
45#endif /* !(CONFIG_40x || CONFIG_8xx) */ 49#endif /* !(CONFIG_40x || CONFIG_8xx) */
46 50
47/* 51/*
@@ -53,7 +57,10 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid,
53{ 57{
54 asm volatile ("tlbie %0; sync" : : "r" (address) : "memory"); 58 asm volatile ("tlbie %0; sync" : : "r" (address) : "memory");
55} 59}
56#else /* CONFIG_8xx */ 60#elif defined(CONFIG_PPC_BOOK3E)
61extern void _tlbil_va(unsigned long address, unsigned int pid,
62 unsigned int tsize, unsigned int ind);
63#else
57extern void __tlbil_va(unsigned long address, unsigned int pid); 64extern void __tlbil_va(unsigned long address, unsigned int pid);
58static inline void _tlbil_va(unsigned long address, unsigned int pid, 65static inline void _tlbil_va(unsigned long address, unsigned int pid,
59 unsigned int tsize, unsigned int ind) 66 unsigned int tsize, unsigned int ind)
@@ -67,11 +74,16 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid,
67 * implementation. When that becomes the case, this will be 74 * implementation. When that becomes the case, this will be
68 * an extern. 75 * an extern.
69 */ 76 */
77#ifdef CONFIG_PPC_BOOK3E
78extern void _tlbivax_bcast(unsigned long address, unsigned int pid,
79 unsigned int tsize, unsigned int ind);
80#else
70static inline void _tlbivax_bcast(unsigned long address, unsigned int pid, 81static inline void _tlbivax_bcast(unsigned long address, unsigned int pid,
71 unsigned int tsize, unsigned int ind) 82 unsigned int tsize, unsigned int ind)
72{ 83{
73 BUG(); 84 BUG();
74} 85}
86#endif
75 87
76#else /* CONFIG_PPC_MMU_NOHASH */ 88#else /* CONFIG_PPC_MMU_NOHASH */
77 89
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
new file mode 100644
index 000000000000..10d524ded7b2
--- /dev/null
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -0,0 +1,734 @@
1/*
2 * Low leve TLB miss handlers for Book3E
3 *
4 * Copyright (C) 2008-2009
5 * Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <asm/processor.h>
14#include <asm/reg.h>
15#include <asm/page.h>
16#include <asm/mmu.h>
17#include <asm/ppc_asm.h>
18#include <asm/asm-offsets.h>
19#include <asm/cputable.h>
20#include <asm/pgtable.h>
21#include <asm/reg.h>
22#include <asm/exception-64e.h>
23#include <asm/ppc-opcode.h>
24
25#ifdef CONFIG_PPC_64K_PAGES
26#define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1)
27#else
28#define VPTE_PMD_SHIFT (PTE_INDEX_SIZE)
29#endif
30#define VPTE_PUD_SHIFT (VPTE_PMD_SHIFT + PMD_INDEX_SIZE)
31#define VPTE_PGD_SHIFT (VPTE_PUD_SHIFT + PUD_INDEX_SIZE)
32#define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE)
33
34
35/**********************************************************************
36 * *
37 * TLB miss handling for Book3E with TLB reservation and HES support *
38 * *
39 **********************************************************************/
40
41
42/* Data TLB miss */
43 START_EXCEPTION(data_tlb_miss)
44 TLB_MISS_PROLOG
45
46 /* Now we handle the fault proper. We only save DEAR in normal
47 * fault case since that's the only interesting values here.
48 * We could probably also optimize by not saving SRR0/1 in the
49 * linear mapping case but I'll leave that for later
50 */
51 mfspr r14,SPRN_ESR
52 mfspr r16,SPRN_DEAR /* get faulting address */
53 srdi r15,r16,60 /* get region */
54 cmpldi cr0,r15,0xc /* linear mapping ? */
55 TLB_MISS_STATS_SAVE_INFO
56 beq tlb_load_linear /* yes -> go to linear map load */
57
58 /* The page tables are mapped virtually linear. At this point, though,
59 * we don't know whether we are trying to fault in a first level
60 * virtual address or a virtual page table address. We can get that
61 * from bit 0x1 of the region ID which we have set for a page table
62 */
63 andi. r10,r15,0x1
64 bne- virt_page_table_tlb_miss
65
66 std r14,EX_TLB_ESR(r12); /* save ESR */
67 std r16,EX_TLB_DEAR(r12); /* save DEAR */
68
69 /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */
70 li r11,_PAGE_PRESENT
71 oris r11,r11,_PAGE_ACCESSED@h
72
73 /* We do the user/kernel test for the PID here along with the RW test
74 */
75 cmpldi cr0,r15,0 /* Check for user region */
76
77 /* We pre-test some combination of permissions to avoid double
78 * faults:
79 *
80 * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
81 * ESR_ST is 0x00800000
82 * _PAGE_BAP_SW is 0x00000010
83 * So the shift is >> 19. This tests for supervisor writeability.
84 * If the page happens to be supervisor writeable and not user
85 * writeable, we will take a new fault later, but that should be
86 * a rare enough case.
87 *
88 * We also move ESR_ST in _PAGE_DIRTY position
89 * _PAGE_DIRTY is 0x00001000 so the shift is >> 11
90 *
91 * MAS1 is preset for all we need except for TID that needs to
92 * be cleared for kernel translations
93 */
94 rlwimi r11,r14,32-19,27,27
95 rlwimi r11,r14,32-16,19,19
96 beq normal_tlb_miss
97 /* XXX replace the RMW cycles with immediate loads + writes */
981: mfspr r10,SPRN_MAS1
99 cmpldi cr0,r15,8 /* Check for vmalloc region */
100 rlwinm r10,r10,0,16,1 /* Clear TID */
101 mtspr SPRN_MAS1,r10
102 beq+ normal_tlb_miss
103
104 /* We got a crappy address, just fault with whatever DEAR and ESR
105 * are here
106 */
107 TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
108 TLB_MISS_EPILOG_ERROR
109 b exc_data_storage_book3e
110
111/* Instruction TLB miss */
112 START_EXCEPTION(instruction_tlb_miss)
113 TLB_MISS_PROLOG
114
115 /* If we take a recursive fault, the second level handler may need
116 * to know whether we are handling a data or instruction fault in
117 * order to get to the right store fault handler. We provide that
118 * info by writing a crazy value in ESR in our exception frame
119 */
120 li r14,-1 /* store to exception frame is done later */
121
122 /* Now we handle the fault proper. We only save DEAR in the non
123 * linear mapping case since we know the linear mapping case will
124 * not re-enter. We could indeed optimize and also not save SRR0/1
125 * in the linear mapping case but I'll leave that for later
126 *
127 * Faulting address is SRR0 which is already in r16
128 */
129 srdi r15,r16,60 /* get region */
130 cmpldi cr0,r15,0xc /* linear mapping ? */
131 TLB_MISS_STATS_SAVE_INFO
132 beq tlb_load_linear /* yes -> go to linear map load */
133
134 /* We do the user/kernel test for the PID here along with the RW test
135 */
136 li r11,_PAGE_PRESENT|_PAGE_HWEXEC /* Base perm */
137 oris r11,r11,_PAGE_ACCESSED@h
138
139 cmpldi cr0,r15,0 /* Check for user region */
140 std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */
141 beq normal_tlb_miss
142 /* XXX replace the RMW cycles with immediate loads + writes */
1431: mfspr r10,SPRN_MAS1
144 cmpldi cr0,r15,8 /* Check for vmalloc region */
145 rlwinm r10,r10,0,16,1 /* Clear TID */
146 mtspr SPRN_MAS1,r10
147 beq+ normal_tlb_miss
148
149 /* We got a crappy address, just fault */
150 TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
151 TLB_MISS_EPILOG_ERROR
152 b exc_instruction_storage_book3e
153
154/*
155 * This is the guts of the first-level TLB miss handler for direct
156 * misses. We are entered with:
157 *
158 * r16 = faulting address
159 * r15 = region ID
160 * r14 = crap (free to use)
161 * r13 = PACA
162 * r12 = TLB exception frame in PACA
163 * r11 = PTE permission mask
164 * r10 = crap (free to use)
165 */
166normal_tlb_miss:
167 /* So we first construct the page table address. We do that by
168 * shifting the bottom of the address (not the region ID) by
169 * PAGE_SHIFT-3, clearing the bottom 3 bits (get a PTE ptr) and
170 * or'ing the fourth high bit.
171 *
172 * NOTE: For 64K pages, we do things slightly differently in
173 * order to handle the weird page table format used by linux
174 */
175 ori r10,r15,0x1
176#ifdef CONFIG_PPC_64K_PAGES
177 /* For the top bits, 16 bytes per PTE */
178 rldicl r14,r16,64-(PAGE_SHIFT-4),PAGE_SHIFT-4+4
179 /* Now create the bottom bits as 0 in position 0x8000 and
180 * the rest calculated for 8 bytes per PTE
181 */
182 rldicl r15,r16,64-(PAGE_SHIFT-3),64-15
183 /* Insert the bottom bits in */
184 rlwimi r14,r15,0,16,31
185#else
186 rldicl r14,r16,64-(PAGE_SHIFT-3),PAGE_SHIFT-3+4
187#endif
188 sldi r15,r10,60
189 clrrdi r14,r14,3
190 or r10,r15,r14
191
192 /* Set the TLB reservation and seach for existing entry. Then load
193 * the entry.
194 */
195 PPC_TLBSRX_DOT(0,r16)
196 ld r14,0(r10)
197 beq normal_tlb_miss_done
198
199finish_normal_tlb_miss:
200 /* Check if required permissions are met */
201 andc. r15,r11,r14
202 bne- normal_tlb_miss_access_fault
203
204 /* Now we build the MAS:
205 *
206 * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
207 * MAS 1 : Almost fully setup
208 * - PID already updated by caller if necessary
209 * - TSIZE need change if !base page size, not
210 * yet implemented for now
211 * MAS 2 : Defaults not useful, need to be redone
212 * MAS 3+7 : Needs to be done
213 *
214 * TODO: mix up code below for better scheduling
215 */
216 clrrdi r11,r16,12 /* Clear low crap in EA */
217 rlwimi r11,r14,32-19,27,31 /* Insert WIMGE */
218 mtspr SPRN_MAS2,r11
219
220 /* Check page size, if not standard, update MAS1 */
221 rldicl r11,r14,64-8,64-8
222#ifdef CONFIG_PPC_64K_PAGES
223 cmpldi cr0,r11,BOOK3E_PAGESZ_64K
224#else
225 cmpldi cr0,r11,BOOK3E_PAGESZ_4K
226#endif
227 beq- 1f
228 mfspr r11,SPRN_MAS1
229 rlwimi r11,r14,31,21,24
230 rlwinm r11,r11,0,21,19
231 mtspr SPRN_MAS1,r11
2321:
233 /* Move RPN in position */
234 rldicr r11,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
235 clrldi r15,r11,12 /* Clear crap at the top */
236 rlwimi r15,r14,32-8,22,25 /* Move in U bits */
237 rlwimi r15,r14,32-2,26,31 /* Move in BAP bits */
238
239 /* Mask out SW and UW if !DIRTY (XXX optimize this !) */
240 andi. r11,r14,_PAGE_DIRTY
241 bne 1f
242 li r11,MAS3_SW|MAS3_UW
243 andc r15,r15,r11
2441: mtspr SPRN_MAS7_MAS3,r15
245
246 tlbwe
247
248normal_tlb_miss_done:
249 /* We don't bother with restoring DEAR or ESR since we know we are
250 * level 0 and just going back to userland. They are only needed
251 * if you are going to take an access fault
252 */
253 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
254 TLB_MISS_EPILOG_SUCCESS
255 rfi
256
257normal_tlb_miss_access_fault:
258 /* We need to check if it was an instruction miss */
259 andi. r10,r11,_PAGE_HWEXEC
260 bne 1f
261 ld r14,EX_TLB_DEAR(r12)
262 ld r15,EX_TLB_ESR(r12)
263 mtspr SPRN_DEAR,r14
264 mtspr SPRN_ESR,r15
265 TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
266 TLB_MISS_EPILOG_ERROR
267 b exc_data_storage_book3e
2681: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
269 TLB_MISS_EPILOG_ERROR
270 b exc_instruction_storage_book3e
271
272
273/*
274 * This is the guts of the second-level TLB miss handler for direct
275 * misses. We are entered with:
276 *
277 * r16 = virtual page table faulting address
278 * r15 = region (top 4 bits of address)
279 * r14 = crap (free to use)
280 * r13 = PACA
281 * r12 = TLB exception frame in PACA
282 * r11 = crap (free to use)
283 * r10 = crap (free to use)
284 *
285 * Note that this should only ever be called as a second level handler
286 * with the current scheme when using SW load.
287 * That means we can always get the original fault DEAR at
288 * EX_TLB_DEAR-EX_TLB_SIZE(r12)
289 *
290 * It can be re-entered by the linear mapping miss handler. However, to
291 * avoid too much complication, it will restart the whole fault at level
292 * 0 so we don't care too much about clobbers
293 *
294 * XXX That code was written back when we couldn't clobber r14. We can now,
295 * so we could probably optimize things a bit
296 */
297virt_page_table_tlb_miss:
298 /* Are we hitting a kernel page table ? */
299 andi. r10,r15,0x8
300
301 /* The cool thing now is that r10 contains 0 for user and 8 for kernel,
302 * and we happen to have the swapper_pg_dir at offset 8 from the user
303 * pgdir in the PACA :-).
304 */
305 add r11,r10,r13
306
307 /* If kernel, we need to clear MAS1 TID */
308 beq 1f
309 /* XXX replace the RMW cycles with immediate loads + writes */
310 mfspr r10,SPRN_MAS1
311 rlwinm r10,r10,0,16,1 /* Clear TID */
312 mtspr SPRN_MAS1,r10
3131:
314 /* Search if we already have a TLB entry for that virtual address, and
315 * if we do, bail out.
316 */
317 PPC_TLBSRX_DOT(0,r16)
318 beq virt_page_table_tlb_miss_done
319
320 /* Now, we need to walk the page tables. First check if we are in
321 * range.
322 */
323 rldicl. r10,r16,64-(VPTE_INDEX_SIZE+3),VPTE_INDEX_SIZE+3+4
324 bne- virt_page_table_tlb_miss_fault
325
326 /* Get the PGD pointer */
327 ld r15,PACAPGD(r11)
328 cmpldi cr0,r15,0
329 beq- virt_page_table_tlb_miss_fault
330
331 /* Get to PGD entry */
332 rldicl r11,r16,64-VPTE_PGD_SHIFT,64-PGD_INDEX_SIZE-3
333 clrrdi r10,r11,3
334 ldx r15,r10,r15
335 cmpldi cr0,r15,0
336 beq virt_page_table_tlb_miss_fault
337
338#ifndef CONFIG_PPC_64K_PAGES
339 /* Get to PUD entry */
340 rldicl r11,r16,64-VPTE_PUD_SHIFT,64-PUD_INDEX_SIZE-3
341 clrrdi r10,r11,3
342 ldx r15,r10,r15
343 cmpldi cr0,r15,0
344 beq virt_page_table_tlb_miss_fault
345#endif /* CONFIG_PPC_64K_PAGES */
346
347 /* Get to PMD entry */
348 rldicl r11,r16,64-VPTE_PMD_SHIFT,64-PMD_INDEX_SIZE-3
349 clrrdi r10,r11,3
350 ldx r15,r10,r15
351 cmpldi cr0,r15,0
352 beq virt_page_table_tlb_miss_fault
353
354 /* Ok, we're all right, we can now create a kernel translation for
355 * a 4K or 64K page from r16 -> r15.
356 */
357 /* Now we build the MAS:
358 *
359 * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
360 * MAS 1 : Almost fully setup
361 * - PID already updated by caller if necessary
362 * - TSIZE for now is base page size always
363 * MAS 2 : Use defaults
364 * MAS 3+7 : Needs to be done
365 *
366 * So we only do MAS 2 and 3 for now...
367 */
368 clrldi r11,r15,4 /* remove region ID from RPN */
369 ori r10,r11,1 /* Or-in SR */
370 mtspr SPRN_MAS7_MAS3,r10
371
372 tlbwe
373
374virt_page_table_tlb_miss_done:
375
376 /* We have overriden MAS2:EPN but currently our primary TLB miss
377 * handler will always restore it so that should not be an issue,
378 * if we ever optimize the primary handler to not write MAS2 on
379 * some cases, we'll have to restore MAS2:EPN here based on the
380 * original fault's DEAR. If we do that we have to modify the
381 * ITLB miss handler to also store SRR0 in the exception frame
382 * as DEAR.
383 *
384 * However, one nasty thing we did is we cleared the reservation
385 * (well, potentially we did). We do a trick here thus if we
386 * are not a level 0 exception (we interrupted the TLB miss) we
387 * offset the return address by -4 in order to replay the tlbsrx
388 * instruction there
389 */
390 subf r10,r13,r12
391 cmpldi cr0,r10,PACA_EXTLB+EX_TLB_SIZE
392 bne- 1f
393 ld r11,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
394 addi r10,r11,-4
395 std r10,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
3961:
397 /* Return to caller, normal case */
398 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK);
399 TLB_MISS_EPILOG_SUCCESS
400 rfi
401
402virt_page_table_tlb_miss_fault:
403 /* If we fault here, things are a little bit tricky. We need to call
404 * either data or instruction store fault, and we need to retreive
405 * the original fault address and ESR (for data).
406 *
407 * The thing is, we know that in normal circumstances, this is
408 * always called as a second level tlb miss for SW load or as a first
409 * level TLB miss for HW load, so we should be able to peek at the
410 * relevant informations in the first exception frame in the PACA.
411 *
412 * However, we do need to double check that, because we may just hit
413 * a stray kernel pointer or a userland attack trying to hit those
414 * areas. If that is the case, we do a data fault. (We can't get here
415 * from an instruction tlb miss anyway).
416 *
417 * Note also that when going to a fault, we must unwind the previous
418 * level as well. Since we are doing that, we don't need to clear or
419 * restore the TLB reservation neither.
420 */
421 subf r10,r13,r12
422 cmpldi cr0,r10,PACA_EXTLB+EX_TLB_SIZE
423 bne- virt_page_table_tlb_miss_whacko_fault
424
425 /* We dig the original DEAR and ESR from slot 0 */
426 ld r15,EX_TLB_DEAR+PACA_EXTLB(r13)
427 ld r16,EX_TLB_ESR+PACA_EXTLB(r13)
428
429 /* We check for the "special" ESR value for instruction faults */
430 cmpdi cr0,r16,-1
431 beq 1f
432 mtspr SPRN_DEAR,r15
433 mtspr SPRN_ESR,r16
434 TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT);
435 TLB_MISS_EPILOG_ERROR
436 b exc_data_storage_book3e
4371: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT);
438 TLB_MISS_EPILOG_ERROR
439 b exc_instruction_storage_book3e
440
441virt_page_table_tlb_miss_whacko_fault:
442 /* The linear fault will restart everything so ESR and DEAR will
443 * not have been clobbered, let's just fault with what we have
444 */
445 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_FAULT);
446 TLB_MISS_EPILOG_ERROR
447 b exc_data_storage_book3e
448
449
450/**************************************************************
451 * *
452 * TLB miss handling for Book3E with hw page table support *
453 * *
454 **************************************************************/
455
456
457/* Data TLB miss */
458 START_EXCEPTION(data_tlb_miss_htw)
459 TLB_MISS_PROLOG
460
461 /* Now we handle the fault proper. We only save DEAR in normal
462 * fault case since that's the only interesting values here.
463 * We could probably also optimize by not saving SRR0/1 in the
464 * linear mapping case but I'll leave that for later
465 */
466 mfspr r14,SPRN_ESR
467 mfspr r16,SPRN_DEAR /* get faulting address */
468 srdi r11,r16,60 /* get region */
469 cmpldi cr0,r11,0xc /* linear mapping ? */
470 TLB_MISS_STATS_SAVE_INFO
471 beq tlb_load_linear /* yes -> go to linear map load */
472
473 /* We do the user/kernel test for the PID here along with the RW test
474 */
475 cmpldi cr0,r11,0 /* Check for user region */
476 ld r15,PACAPGD(r13) /* Load user pgdir */
477 beq htw_tlb_miss
478
479 /* XXX replace the RMW cycles with immediate loads + writes */
4801: mfspr r10,SPRN_MAS1
481 cmpldi cr0,r11,8 /* Check for vmalloc region */
482 rlwinm r10,r10,0,16,1 /* Clear TID */
483 mtspr SPRN_MAS1,r10
484 ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
485 beq+ htw_tlb_miss
486
487 /* We got a crappy address, just fault with whatever DEAR and ESR
488 * are here
489 */
490 TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
491 TLB_MISS_EPILOG_ERROR
492 b exc_data_storage_book3e
493
494/* Instruction TLB miss */
495 START_EXCEPTION(instruction_tlb_miss_htw)
496 TLB_MISS_PROLOG
497
498 /* If we take a recursive fault, the second level handler may need
499 * to know whether we are handling a data or instruction fault in
500 * order to get to the right store fault handler. We provide that
501 * info by keeping a crazy value for ESR in r14
502 */
503 li r14,-1 /* store to exception frame is done later */
504
505 /* Now we handle the fault proper. We only save DEAR in the non
506 * linear mapping case since we know the linear mapping case will
507 * not re-enter. We could indeed optimize and also not save SRR0/1
508 * in the linear mapping case but I'll leave that for later
509 *
510 * Faulting address is SRR0 which is already in r16
511 */
512 srdi r11,r16,60 /* get region */
513 cmpldi cr0,r11,0xc /* linear mapping ? */
514 TLB_MISS_STATS_SAVE_INFO
515 beq tlb_load_linear /* yes -> go to linear map load */
516
517 /* We do the user/kernel test for the PID here along with the RW test
518 */
519 cmpldi cr0,r11,0 /* Check for user region */
520 ld r15,PACAPGD(r13) /* Load user pgdir */
521 beq htw_tlb_miss
522
523 /* XXX replace the RMW cycles with immediate loads + writes */
5241: mfspr r10,SPRN_MAS1
525 cmpldi cr0,r11,8 /* Check for vmalloc region */
526 rlwinm r10,r10,0,16,1 /* Clear TID */
527 mtspr SPRN_MAS1,r10
528 ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
529 beq+ htw_tlb_miss
530
531 /* We got a crappy address, just fault */
532 TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
533 TLB_MISS_EPILOG_ERROR
534 b exc_instruction_storage_book3e
535
536
537/*
538 * This is the guts of the second-level TLB miss handler for direct
539 * misses. We are entered with:
540 *
541 * r16 = virtual page table faulting address
542 * r15 = PGD pointer
543 * r14 = ESR
544 * r13 = PACA
545 * r12 = TLB exception frame in PACA
546 * r11 = crap (free to use)
547 * r10 = crap (free to use)
548 *
549 * It can be re-entered by the linear mapping miss handler. However, to
550 * avoid too much complication, it will save/restore things for us
551 */
552htw_tlb_miss:
553 /* Search if we already have a TLB entry for that virtual address, and
554 * if we do, bail out.
555 *
556 * MAS1:IND should be already set based on MAS4
557 */
558 PPC_TLBSRX_DOT(0,r16)
559 beq htw_tlb_miss_done
560
561 /* Now, we need to walk the page tables. First check if we are in
562 * range.
563 */
564 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
565 bne- htw_tlb_miss_fault
566
567 /* Get the PGD pointer */
568 cmpldi cr0,r15,0
569 beq- htw_tlb_miss_fault
570
571 /* Get to PGD entry */
572 rldicl r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3
573 clrrdi r10,r11,3
574 ldx r15,r10,r15
575 cmpldi cr0,r15,0
576 beq htw_tlb_miss_fault
577
578#ifndef CONFIG_PPC_64K_PAGES
579 /* Get to PUD entry */
580 rldicl r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3
581 clrrdi r10,r11,3
582 ldx r15,r10,r15
583 cmpldi cr0,r15,0
584 beq htw_tlb_miss_fault
585#endif /* CONFIG_PPC_64K_PAGES */
586
587 /* Get to PMD entry */
588 rldicl r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3
589 clrrdi r10,r11,3
590 ldx r15,r10,r15
591 cmpldi cr0,r15,0
592 beq htw_tlb_miss_fault
593
594 /* Ok, we're all right, we can now create an indirect entry for
595 * a 1M or 256M page.
596 *
597 * The last trick is now that because we use "half" pages for
598 * the HTW (1M IND is 2K and 256M IND is 32K) we need to account
599 * for an added LSB bit to the RPN. For 64K pages, there is no
600 * problem as we already use 32K arrays (half PTE pages), but for
601 * 4K page we need to extract a bit from the virtual address and
602 * insert it into the "PA52" bit of the RPN.
603 */
604#ifndef CONFIG_PPC_64K_PAGES
605 rlwimi r15,r16,32-9,20,20
606#endif
607 /* Now we build the MAS:
608 *
609 * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
610 * MAS 1 : Almost fully setup
611 * - PID already updated by caller if necessary
612 * - TSIZE for now is base ind page size always
613 * MAS 2 : Use defaults
614 * MAS 3+7 : Needs to be done
615 */
616#ifdef CONFIG_PPC_64K_PAGES
617 ori r10,r15,(BOOK3E_PAGESZ_64K << MAS3_SPSIZE_SHIFT)
618#else
619 ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
620#endif
621 mtspr SPRN_MAS7_MAS3,r10
622
623 tlbwe
624
625htw_tlb_miss_done:
626 /* We don't bother with restoring DEAR or ESR since we know we are
627 * level 0 and just going back to userland. They are only needed
628 * if you are going to take an access fault
629 */
630 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK)
631 TLB_MISS_EPILOG_SUCCESS
632 rfi
633
634htw_tlb_miss_fault:
635 /* We need to check if it was an instruction miss. We know this
636 * though because r14 would contain -1
637 */
638 cmpdi cr0,r14,-1
639 beq 1f
640 mtspr SPRN_DEAR,r16
641 mtspr SPRN_ESR,r14
642 TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT)
643 TLB_MISS_EPILOG_ERROR
644 b exc_data_storage_book3e
6451: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT)
646 TLB_MISS_EPILOG_ERROR
647 b exc_instruction_storage_book3e
648
649/*
650 * This is the guts of "any" level TLB miss handler for kernel linear
651 * mapping misses. We are entered with:
652 *
653 *
654 * r16 = faulting address
655 * r15 = crap (free to use)
656 * r14 = ESR (data) or -1 (instruction)
657 * r13 = PACA
658 * r12 = TLB exception frame in PACA
659 * r11 = crap (free to use)
660 * r10 = crap (free to use)
661 *
662 * In addition we know that we will not re-enter, so in theory, we could
663 * use a simpler epilog not restoring SRR0/1 etc.. but we'll do that later.
664 *
665 * We also need to be careful about MAS registers here & TLB reservation,
666 * as we know we'll have clobbered them if we interrupt the main TLB miss
667 * handlers in which case we probably want to do a full restart at level
668 * 0 rather than saving / restoring the MAS.
669 *
670 * Note: If we care about performance of that core, we can easily shuffle
671 * a few things around
672 */
673tlb_load_linear:
674 /* For now, we assume the linear mapping is contiguous and stops at
675 * linear_map_top. We also assume the size is a multiple of 1G, thus
676 * we only use 1G pages for now. That might have to be changed in a
677 * final implementation, especially when dealing with hypervisors
678 */
679 ld r11,PACATOC(r13)
680 ld r11,linear_map_top@got(r11)
681 ld r10,0(r11)
682 cmpld cr0,r10,r16
683 bge tlb_load_linear_fault
684
685 /* MAS1 need whole new setup. */
686 li r15,(BOOK3E_PAGESZ_1GB<<MAS1_TSIZE_SHIFT)
687 oris r15,r15,MAS1_VALID@h /* MAS1 needs V and TSIZE */
688 mtspr SPRN_MAS1,r15
689
690 /* Already somebody there ? */
691 PPC_TLBSRX_DOT(0,r16)
692 beq tlb_load_linear_done
693
694 /* Now we build the remaining MAS. MAS0 and 2 should be fine
695 * with their defaults, which leaves us with MAS 3 and 7. The
696 * mapping is linear, so we just take the address, clear the
697 * region bits, and or in the permission bits which are currently
698 * hard wired
699 */
700 clrrdi r10,r16,30 /* 1G page index */
701 clrldi r10,r10,4 /* clear region bits */
702 ori r10,r10,MAS3_SR|MAS3_SW|MAS3_SX
703 mtspr SPRN_MAS7_MAS3,r10
704
705 tlbwe
706
707tlb_load_linear_done:
708 /* We use the "error" epilog for success as we do want to
709 * restore to the initial faulting context, whatever it was.
710 * We do that because we can't resume a fault within a TLB
711 * miss handler, due to MAS and TLB reservation being clobbered.
712 */
713 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_LINEAR)
714 TLB_MISS_EPILOG_ERROR
715 rfi
716
717tlb_load_linear_fault:
718 /* We keep the DEAR and ESR around, this shouldn't have happened */
719 cmpdi cr0,r14,-1
720 beq 1f
721 TLB_MISS_EPILOG_ERROR_SPECIAL
722 b exc_data_storage_book3e
7231: TLB_MISS_EPILOG_ERROR_SPECIAL
724 b exc_instruction_storage_book3e
725
726
727#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
728.tlb_stat_inc:
7291: ldarx r8,0,r9
730 addi r8,r8,1
731 stdcx. r8,0,r9
732 bne- 1b
733 blr
734#endif
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 6b43fc49f103..d16100c9416a 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -7,8 +7,8 @@
7 * 7 *
8 * -- BenH 8 * -- BenH
9 * 9 *
10 * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org> 10 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
11 * IBM Corp. 11 * IBM Corp.
12 * 12 *
13 * Derived from arch/ppc/mm/init.c: 13 * Derived from arch/ppc/mm/init.c:
14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
@@ -34,12 +34,70 @@
34#include <linux/pagemap.h> 34#include <linux/pagemap.h>
35#include <linux/preempt.h> 35#include <linux/preempt.h>
36#include <linux/spinlock.h> 36#include <linux/spinlock.h>
37#include <linux/lmb.h>
37 38
38#include <asm/tlbflush.h> 39#include <asm/tlbflush.h>
39#include <asm/tlb.h> 40#include <asm/tlb.h>
41#include <asm/code-patching.h>
40 42
41#include "mmu_decl.h" 43#include "mmu_decl.h"
42 44
45#ifdef CONFIG_PPC_BOOK3E
46struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
47 [MMU_PAGE_4K] = {
48 .shift = 12,
49 .enc = BOOK3E_PAGESZ_4K,
50 },
51 [MMU_PAGE_16K] = {
52 .shift = 14,
53 .enc = BOOK3E_PAGESZ_16K,
54 },
55 [MMU_PAGE_64K] = {
56 .shift = 16,
57 .enc = BOOK3E_PAGESZ_64K,
58 },
59 [MMU_PAGE_1M] = {
60 .shift = 20,
61 .enc = BOOK3E_PAGESZ_1M,
62 },
63 [MMU_PAGE_16M] = {
64 .shift = 24,
65 .enc = BOOK3E_PAGESZ_16M,
66 },
67 [MMU_PAGE_256M] = {
68 .shift = 28,
69 .enc = BOOK3E_PAGESZ_256M,
70 },
71 [MMU_PAGE_1G] = {
72 .shift = 30,
73 .enc = BOOK3E_PAGESZ_1GB,
74 },
75};
76static inline int mmu_get_tsize(int psize)
77{
78 return mmu_psize_defs[psize].enc;
79}
80#else
81static inline int mmu_get_tsize(int psize)
82{
83 /* This isn't used on !Book3E for now */
84 return 0;
85}
86#endif
87
88/* The variables below are currently only used on 64-bit Book3E
89 * though this will probably be made common with other nohash
90 * implementations at some point
91 */
92#ifdef CONFIG_PPC64
93
94int mmu_linear_psize; /* Page size used for the linear mapping */
95int mmu_pte_psize; /* Page size used for PTE pages */
96int book3e_htw_enabled; /* Is HW tablewalk enabled ? */
97unsigned long linear_map_top; /* Top of linear mapping */
98
99#endif /* CONFIG_PPC64 */
100
43/* 101/*
44 * Base TLB flushing operations: 102 * Base TLB flushing operations:
45 * 103 *
@@ -82,7 +140,7 @@ void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
82void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 140void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
83{ 141{
84 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, 142 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
85 0 /* tsize unused for now */, 0); 143 mmu_get_tsize(mmu_virtual_psize), 0);
86} 144}
87EXPORT_SYMBOL(local_flush_tlb_page); 145EXPORT_SYMBOL(local_flush_tlb_page);
88 146
@@ -198,7 +256,7 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
198void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 256void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
199{ 257{
200 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, 258 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
201 0 /* tsize unused for now */, 0); 259 mmu_get_tsize(mmu_virtual_psize), 0);
202} 260}
203EXPORT_SYMBOL(flush_tlb_page); 261EXPORT_SYMBOL(flush_tlb_page);
204 262
@@ -241,3 +299,140 @@ void tlb_flush(struct mmu_gather *tlb)
241 /* Push out batch of freed page tables */ 299 /* Push out batch of freed page tables */
242 pte_free_finish(); 300 pte_free_finish();
243} 301}
302
303/*
304 * Below are functions specific to the 64-bit variant of Book3E though that
305 * may change in the future
306 */
307
308#ifdef CONFIG_PPC64
309
310/*
311 * Handling of virtual linear page tables or indirect TLB entries
312 * flushing when PTE pages are freed
313 */
314void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
315{
316 int tsize = mmu_psize_defs[mmu_pte_psize].enc;
317
318 if (book3e_htw_enabled) {
319 unsigned long start = address & PMD_MASK;
320 unsigned long end = address + PMD_SIZE;
321 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
322
323 /* This isn't the most optimal, ideally we would factor out the
324 * while preempt & CPU mask mucking around, or even the IPI but
325 * it will do for now
326 */
327 while (start < end) {
328 __flush_tlb_page(tlb->mm, start, tsize, 1);
329 start += size;
330 }
331 } else {
332 unsigned long rmask = 0xf000000000000000ul;
333 unsigned long rid = (address & rmask) | 0x1000000000000000ul;
334 unsigned long vpte = address & ~rmask;
335
336#ifdef CONFIG_PPC_64K_PAGES
337 vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
338#else
339 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
340#endif
341 vpte |= rid;
342 __flush_tlb_page(tlb->mm, vpte, tsize, 0);
343 }
344}
345
346/*
347 * Early initialization of the MMU TLB code
348 */
349static void __early_init_mmu(int boot_cpu)
350{
351 extern unsigned int interrupt_base_book3e;
352 extern unsigned int exc_data_tlb_miss_htw_book3e;
353 extern unsigned int exc_instruction_tlb_miss_htw_book3e;
354
355 unsigned int *ibase = &interrupt_base_book3e;
356 unsigned int mas4;
357
358 /* XXX This will have to be decided at runtime, but right
359 * now our boot and TLB miss code hard wires it
360 */
361 mmu_linear_psize = MMU_PAGE_1G;
362
363
364 /* Check if HW tablewalk is present, and if yes, enable it by:
365 *
366 * - patching the TLB miss handlers to branch to the
367 * one dedicates to it
368 *
369 * - setting the global book3e_htw_enabled
370 *
371 * - Set MAS4:INDD and default page size
372 */
373
374 /* XXX This code only checks for TLB 0 capabilities and doesn't
375 * check what page size combos are supported by the HW. It
376 * also doesn't handle the case where a separate array holds
377 * the IND entries from the array loaded by the PT.
378 */
379 if (boot_cpu) {
380 unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);
381
382 /* Check if HW loader is supported */
383 if ((tlb0cfg & TLBnCFG_IND) &&
384 (tlb0cfg & TLBnCFG_PT)) {
385 patch_branch(ibase + (0x1c0 / 4),
386 (unsigned long)&exc_data_tlb_miss_htw_book3e, 0);
387 patch_branch(ibase + (0x1e0 / 4),
388 (unsigned long)&exc_instruction_tlb_miss_htw_book3e, 0);
389 book3e_htw_enabled = 1;
390 }
391 pr_info("MMU: Book3E Page Tables %s\n",
392 book3e_htw_enabled ? "Enabled" : "Disabled");
393 }
394
395 /* Set MAS4 based on page table setting */
396
397 mas4 = 0x4 << MAS4_WIMGED_SHIFT;
398 if (book3e_htw_enabled) {
399 mas4 |= mas4 | MAS4_INDD;
400#ifdef CONFIG_PPC_64K_PAGES
401 mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
402 mmu_pte_psize = MMU_PAGE_256M;
403#else
404 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
405 mmu_pte_psize = MMU_PAGE_1M;
406#endif
407 } else {
408#ifdef CONFIG_PPC_64K_PAGES
409 mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
410#else
411 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
412#endif
413 mmu_pte_psize = mmu_virtual_psize;
414 }
415 mtspr(SPRN_MAS4, mas4);
416
417 /* Set the global containing the top of the linear mapping
418 * for use by the TLB miss code
419 */
420 linear_map_top = lmb_end_of_DRAM();
421
422 /* A sync won't hurt us after mucking around with
423 * the MMU configuration
424 */
425 mb();
426}
427
428void __init early_init_mmu(void)
429{
430 __early_init_mmu(1);
431}
432
433void __cpuinit early_init_mmu_secondary(void)
434{
435 __early_init_mmu(0);
436}
437
438#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index c7d89a0adba2..7bcd9fbf6cc6 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -191,6 +191,85 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
191 isync 191 isync
1921: wrtee r10 1921: wrtee r10
193 blr 193 blr
194#elif defined(CONFIG_PPC_BOOK3E)
195/*
196 * New Book3E (>= 2.06) implementation
197 *
198 * Note: We may be able to get away without the interrupt masking stuff
199 * if we save/restore MAS6 on exceptions that might modify it
200 */
201_GLOBAL(_tlbil_pid)
202 slwi r4,r3,MAS6_SPID_SHIFT
203 mfmsr r10
204 wrteei 0
205 mtspr SPRN_MAS6,r4
206 PPC_TLBILX_PID(0,0)
207 wrtee r10
208 msync
209 isync
210 blr
211
212_GLOBAL(_tlbil_pid_noind)
213 slwi r4,r3,MAS6_SPID_SHIFT
214 mfmsr r10
215 ori r4,r4,MAS6_SIND
216 wrteei 0
217 mtspr SPRN_MAS6,r4
218 PPC_TLBILX_PID(0,0)
219 wrtee r10
220 msync
221 isync
222 blr
223
224_GLOBAL(_tlbil_all)
225 PPC_TLBILX_ALL(0,0)
226 msync
227 isync
228 blr
229
230_GLOBAL(_tlbil_va)
231 mfmsr r10
232 wrteei 0
233 cmpwi cr0,r6,0
234 slwi r4,r4,MAS6_SPID_SHIFT
235 rlwimi r4,r5,MAS6_ISIZE_SHIFT,MAS6_ISIZE_MASK
236 beq 1f
237 rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND
2381: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
239 PPC_TLBILX_VA(0,r3)
240 msync
241 isync
242 wrtee r10
243 blr
244
245_GLOBAL(_tlbivax_bcast)
246 mfmsr r10
247 wrteei 0
248 cmpwi cr0,r6,0
249 slwi r4,r4,MAS6_SPID_SHIFT
250 rlwimi r4,r5,MAS6_ISIZE_SHIFT,MAS6_ISIZE_MASK
251 beq 1f
252 rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND
2531: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
254 PPC_TLBIVAX(0,r3)
255 eieio
256 tlbsync
257 sync
258 wrtee r10
259 blr
260
261_GLOBAL(set_context)
262#ifdef CONFIG_BDI_SWITCH
263 /* Context switch the PTE pointer for the Abatron BDI2000.
264 * The PGDIR is the second parameter.
265 */
266 lis r5, abatron_pteptrs@h
267 ori r5, r5, abatron_pteptrs@l
268 stw r4, 0x4(r5)
269#endif
270 mtspr SPRN_PID,r3
271 isync /* Force context change */
272 blr
194#else 273#else
195#error Unsupported processor type ! 274#error Unsupported processor type !
196#endif 275#endif