aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/tlb_low_64e.S
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2011-06-22 07:25:42 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-06-29 03:47:48 -0400
commitf67f4ef5fcdfdeeddcb0ed4ab2c85d9bb4185d5f (patch)
tree2a2dd8b027cc596dae37dd1b0a3710bca4791ef1 /arch/powerpc/mm/tlb_low_64e.S
parent3d97a619acbb2c8a7a9a7da08c2d3041dfdd241f (diff)
powerpc/book3e-64: use a separate TLB handler when linear map is bolted
On MMUs such as FSL where we can guarantee the entire linear mapping is bolted, we don't need to worry about linear TLB misses. If on top of that we do a full table walk, we get rid of all recursive TLB faults, and can dispense with some state saving. This gains a few percent on TLB-miss-heavy workloads, and around 50% on a benchmark that had a high rate of virtual page table faults under the normal handler. While touching the EX_TLB layout, remove EX_TLB_MMUCR0, EX_TLB_SRR0, and EX_TLB_SRR1 as they're not used. [BenH: Fixed build with 64K pages (wsp config)] Signed-off-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/tlb_low_64e.S')
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S206
1 files changed, 206 insertions, 0 deletions
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index af0892209417..4ebb34bc01d6 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -30,6 +30,212 @@
30#define VPTE_PGD_SHIFT (VPTE_PUD_SHIFT + PUD_INDEX_SIZE) 30#define VPTE_PGD_SHIFT (VPTE_PUD_SHIFT + PUD_INDEX_SIZE)
31#define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE) 31#define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE)
32 32
33/**********************************************************************
34 * *
35 * TLB miss handling for Book3E with a bolted linear mapping *
36 * No virtual page table, no nested TLB misses *
37 * *
38 **********************************************************************/
39
40.macro tlb_prolog_bolted addr
41 mtspr SPRN_SPRG_TLB_SCRATCH,r13
42 mfspr r13,SPRN_SPRG_PACA
43 std r10,PACA_EXTLB+EX_TLB_R10(r13)
44 mfcr r10
45 std r11,PACA_EXTLB+EX_TLB_R11(r13)
46 std r16,PACA_EXTLB+EX_TLB_R16(r13)
47 mfspr r16,\addr /* get faulting address */
48 std r14,PACA_EXTLB+EX_TLB_R14(r13)
49 ld r14,PACAPGD(r13)
50 std r15,PACA_EXTLB+EX_TLB_R15(r13)
51 std r10,PACA_EXTLB+EX_TLB_CR(r13)
52 TLB_MISS_PROLOG_STATS_BOLTED
53.endm
54
55.macro tlb_epilog_bolted
56 ld r14,PACA_EXTLB+EX_TLB_CR(r13)
57 ld r10,PACA_EXTLB+EX_TLB_R10(r13)
58 ld r11,PACA_EXTLB+EX_TLB_R11(r13)
59 mtcr r14
60 ld r14,PACA_EXTLB+EX_TLB_R14(r13)
61 ld r15,PACA_EXTLB+EX_TLB_R15(r13)
62 TLB_MISS_RESTORE_STATS_BOLTED
63 ld r16,PACA_EXTLB+EX_TLB_R16(r13)
64 mfspr r13,SPRN_SPRG_TLB_SCRATCH
65.endm
66
67/* Data TLB miss */
68 START_EXCEPTION(data_tlb_miss_bolted)
69 tlb_prolog_bolted SPRN_DEAR
70
71 /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */
72
73 /* We do the user/kernel test for the PID here along with the RW test
74 */
75 /* We pre-test some combination of permissions to avoid double
76 * faults:
77 *
78 * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
79 * ESR_ST is 0x00800000
80 * _PAGE_BAP_SW is 0x00000010
81 * So the shift is >> 19. This tests for supervisor writeability.
82 * If the page happens to be supervisor writeable and not user
83 * writeable, we will take a new fault later, but that should be
84 * a rare enough case.
85 *
86 * We also move ESR_ST in _PAGE_DIRTY position
87 * _PAGE_DIRTY is 0x00001000 so the shift is >> 11
88 *
89 * MAS1 is preset for all we need except for TID that needs to
90 * be cleared for kernel translations
91 */
92
93 mfspr r11,SPRN_ESR
94
95 srdi r15,r16,60 /* get region */
96 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
97 bne- dtlb_miss_fault_bolted
98
99 rlwinm r10,r11,32-19,27,27
100 rlwimi r10,r11,32-16,19,19
101 cmpwi r15,0
102 ori r10,r10,_PAGE_PRESENT
103 oris r11,r10,_PAGE_ACCESSED@h
104
105 TLB_MISS_STATS_SAVE_INFO_BOLTED
106 bne tlb_miss_kernel_bolted
107
108tlb_miss_common_bolted:
109/*
110 * This is the guts of the TLB miss handler for bolted-linear.
111 * We are entered with:
112 *
113 * r16 = faulting address
114 * r15 = crap (free to use)
115 * r14 = page table base
116 * r13 = PACA
117 * r11 = PTE permission mask
118 * r10 = crap (free to use)
119 */
120 rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
121 cmpldi cr0,r14,0
122 clrrdi r15,r15,3
123 beq tlb_miss_fault_bolted
124
125BEGIN_MMU_FTR_SECTION
126 /* Set the TLB reservation and search for existing entry. Then load
127 * the entry.
128 */
129 PPC_TLBSRX_DOT(0,r16)
130 ldx r14,r14,r15
131 beq normal_tlb_miss_done
132MMU_FTR_SECTION_ELSE
133 ldx r14,r14,r15
134ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
135
136#ifndef CONFIG_PPC_64K_PAGES
137 rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
138 clrrdi r15,r15,3
139
140 cmpldi cr0,r14,0
141 beq tlb_miss_fault_bolted
142
143 ldx r14,r14,r15
144#endif /* CONFIG_PPC_64K_PAGES */
145
146 rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
147 clrrdi r15,r15,3
148
149 cmpldi cr0,r14,0
150 beq tlb_miss_fault_bolted
151
152 ldx r14,r14,r15
153
154 rldicl r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3
155 clrrdi r15,r15,3
156
157 cmpldi cr0,r14,0
158 beq tlb_miss_fault_bolted
159
160 ldx r14,r14,r15
161
162 /* Check if required permissions are met */
163 andc. r15,r11,r14
164 rldicr r15,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
165 bne- tlb_miss_fault_bolted
166
167 /* Now we build the MAS:
168 *
169 * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
170 * MAS 1 : Almost fully setup
171 * - PID already updated by caller if necessary
172 * - TSIZE need change if !base page size, not
173 * yet implemented for now
174 * MAS 2 : Defaults not useful, need to be redone
175 * MAS 3+7 : Needs to be done
176 */
177 clrrdi r11,r16,12 /* Clear low crap in EA */
178 clrldi r15,r15,12 /* Clear crap at the top */
179 rlwimi r11,r14,32-19,27,31 /* Insert WIMGE */
180 rlwimi r15,r14,32-8,22,25 /* Move in U bits */
181 mtspr SPRN_MAS2,r11
182 andi. r11,r14,_PAGE_DIRTY
183 rlwimi r15,r14,32-2,26,31 /* Move in BAP bits */
184
185 /* Mask out SW and UW if !DIRTY (XXX optimize this !) */
186 bne 1f
187 li r11,MAS3_SW|MAS3_UW
188 andc r15,r15,r11
1891:
190 mtspr SPRN_MAS7_MAS3,r15
191 tlbwe
192
193 TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
194 tlb_epilog_bolted
195 rfi
196
197itlb_miss_kernel_bolted:
198 li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */
199 oris r11,r11,_PAGE_ACCESSED@h
200tlb_miss_kernel_bolted:
201 mfspr r10,SPRN_MAS1
202 ld r14,PACA_KERNELPGD(r13)
203 cmpldi cr0,r15,8 /* Check for vmalloc region */
204 rlwinm r10,r10,0,16,1 /* Clear TID */
205 mtspr SPRN_MAS1,r10
206 beq+ tlb_miss_common_bolted
207
208tlb_miss_fault_bolted:
209 /* We need to check if it was an instruction miss */
210 andi. r10,r11,_PAGE_EXEC|_PAGE_BAP_SX
211 bne itlb_miss_fault_bolted
212dtlb_miss_fault_bolted:
213 TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
214 tlb_epilog_bolted
215 b exc_data_storage_book3e
216itlb_miss_fault_bolted:
217 TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
218 tlb_epilog_bolted
219 b exc_instruction_storage_book3e
220
221/* Instruction TLB miss */
222 START_EXCEPTION(instruction_tlb_miss_bolted)
223 tlb_prolog_bolted SPRN_SRR0
224
225 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
226 srdi r15,r16,60 /* get region */
227 TLB_MISS_STATS_SAVE_INFO_BOLTED
228 bne- itlb_miss_fault_bolted
229
230 li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */
231
232 /* We do the user/kernel test for the PID here along with the RW test
233 */
234
235 cmpldi cr0,r15,0 /* Check for user region */
236 oris r11,r11,_PAGE_ACCESSED@h
237 beq tlb_miss_common_bolted
238 b itlb_miss_kernel_bolted
33 239
34/********************************************************************** 240/**********************************************************************
35 * * 241 * *