diff options
Diffstat (limited to 'arch/frv/mm/tlb-miss.S')
-rw-r--r-- | arch/frv/mm/tlb-miss.S | 631 |
1 files changed, 631 insertions, 0 deletions
diff --git a/arch/frv/mm/tlb-miss.S b/arch/frv/mm/tlb-miss.S new file mode 100644 index 000000000000..8729f7d7c6e0 --- /dev/null +++ b/arch/frv/mm/tlb-miss.S | |||
@@ -0,0 +1,631 @@ | |||
1 | /* tlb-miss.S: TLB miss handlers | ||
2 | * | ||
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sys.h> | ||
13 | #include <linux/config.h> | ||
14 | #include <linux/linkage.h> | ||
15 | #include <asm/page.h> | ||
16 | #include <asm/pgtable.h> | ||
17 | #include <asm/highmem.h> | ||
18 | #include <asm/spr-regs.h> | ||
19 | |||
20 | .section .text | ||
21 | .balign 4 | ||
22 | |||
23 | .globl __entry_insn_mmu_miss | ||
24 | __entry_insn_mmu_miss: | ||
25 | break | ||
26 | nop | ||
27 | |||
28 | .globl __entry_insn_mmu_exception | ||
29 | __entry_insn_mmu_exception: | ||
30 | break | ||
31 | nop | ||
32 | |||
33 | .globl __entry_data_mmu_miss | ||
34 | __entry_data_mmu_miss: | ||
35 | break | ||
36 | nop | ||
37 | |||
38 | .globl __entry_data_mmu_exception | ||
39 | __entry_data_mmu_exception: | ||
40 | break | ||
41 | nop | ||
42 | |||
43 | ############################################################################### | ||
44 | # | ||
45 | # handle a lookup failure of one sort or another in a kernel TLB handler | ||
46 | # On entry: | ||
47 | # GR29 - faulting address | ||
48 | # SCR2 - saved CCR | ||
49 | # | ||
50 | ############################################################################### | ||
51 | .type __tlb_kernel_fault,@function | ||
52 | __tlb_kernel_fault: | ||
53 | # see if we're supposed to re-enable single-step mode upon return | ||
54 | sethi.p %hi(__break_tlb_miss_return_break),gr30 | ||
55 | setlo %lo(__break_tlb_miss_return_break),gr30 | ||
56 | movsg pcsr,gr31 | ||
57 | |||
58 | subcc gr31,gr30,gr0,icc0 | ||
59 | beq icc0,#0,__tlb_kernel_fault_sstep | ||
60 | |||
61 | movsg scr2,gr30 | ||
62 | movgs gr30,ccr | ||
63 | movgs gr29,scr2 /* save EAR0 value */ | ||
64 | sethi.p %hi(__kernel_current_task),gr29 | ||
65 | setlo %lo(__kernel_current_task),gr29 | ||
66 | ldi.p @(gr29,#0),gr29 /* restore GR29 */ | ||
67 | |||
68 | bra __entry_kernel_handle_mmu_fault | ||
69 | |||
70 | # we've got to re-enable single-stepping | ||
71 | __tlb_kernel_fault_sstep: | ||
72 | sethi.p %hi(__break_tlb_miss_real_return_info),gr30 | ||
73 | setlo %lo(__break_tlb_miss_real_return_info),gr30 | ||
74 | lddi @(gr30,0),gr30 | ||
75 | movgs gr30,pcsr | ||
76 | movgs gr31,psr | ||
77 | |||
78 | movsg scr2,gr30 | ||
79 | movgs gr30,ccr | ||
80 | movgs gr29,scr2 /* save EAR0 value */ | ||
81 | sethi.p %hi(__kernel_current_task),gr29 | ||
82 | setlo %lo(__kernel_current_task),gr29 | ||
83 | ldi.p @(gr29,#0),gr29 /* restore GR29 */ | ||
84 | bra __entry_kernel_handle_mmu_fault_sstep | ||
85 | |||
86 | .size __tlb_kernel_fault, .-__tlb_kernel_fault | ||
87 | |||
88 | ############################################################################### | ||
89 | # | ||
90 | # handle a lookup failure of one sort or another in a user TLB handler | ||
91 | # On entry: | ||
92 | # GR28 - faulting address | ||
93 | # SCR2 - saved CCR | ||
94 | # | ||
95 | ############################################################################### | ||
96 | .type __tlb_user_fault,@function | ||
97 | __tlb_user_fault: | ||
98 | # see if we're supposed to re-enable single-step mode upon return | ||
99 | sethi.p %hi(__break_tlb_miss_return_break),gr30 | ||
100 | setlo %lo(__break_tlb_miss_return_break),gr30 | ||
101 | movsg pcsr,gr31 | ||
102 | subcc gr31,gr30,gr0,icc0 | ||
103 | beq icc0,#0,__tlb_user_fault_sstep | ||
104 | |||
105 | movsg scr2,gr30 | ||
106 | movgs gr30,ccr | ||
107 | bra __entry_uspace_handle_mmu_fault | ||
108 | |||
109 | # we've got to re-enable single-stepping | ||
110 | __tlb_user_fault_sstep: | ||
111 | sethi.p %hi(__break_tlb_miss_real_return_info),gr30 | ||
112 | setlo %lo(__break_tlb_miss_real_return_info),gr30 | ||
113 | lddi @(gr30,0),gr30 | ||
114 | movgs gr30,pcsr | ||
115 | movgs gr31,psr | ||
116 | movsg scr2,gr30 | ||
117 | movgs gr30,ccr | ||
118 | bra __entry_uspace_handle_mmu_fault_sstep | ||
119 | |||
120 | .size __tlb_user_fault, .-__tlb_user_fault | ||
121 | |||
122 | ############################################################################### | ||
123 | # | ||
124 | # Kernel instruction TLB miss handler | ||
125 | # On entry: | ||
126 | # GR1 - kernel stack pointer | ||
127 | # GR28 - saved exception frame pointer | ||
128 | # GR29 - faulting address | ||
129 | # GR31 - EAR0 ^ SCR0 | ||
130 | # SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff) | ||
131 | # DAMR3 - mapped page directory | ||
132 | # DAMR4 - mapped page table as matched by SCR0 | ||
133 | # | ||
134 | ############################################################################### | ||
135 | .globl __entry_kernel_insn_tlb_miss | ||
136 | .type __entry_kernel_insn_tlb_miss,@function | ||
137 | __entry_kernel_insn_tlb_miss: | ||
138 | #if 0 | ||
139 | sethi.p %hi(0xe1200004),gr30 | ||
140 | setlo %lo(0xe1200004),gr30 | ||
141 | st gr0,@(gr30,gr0) | ||
142 | sethi.p %hi(0xffc00100),gr30 | ||
143 | setlo %lo(0xffc00100),gr30 | ||
144 | sth gr30,@(gr30,gr0) | ||
145 | membar | ||
146 | #endif | ||
147 | |||
148 | movsg ccr,gr30 /* save CCR */ | ||
149 | movgs gr30,scr2 | ||
150 | |||
151 | # see if the cached page table mapping is appropriate | ||
152 | srlicc.p gr31,#26,gr0,icc0 | ||
153 | setlos 0x3ffc,gr30 | ||
154 | srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
155 | bne icc0,#0,__itlb_k_PTD_miss | ||
156 | |||
157 | __itlb_k_PTD_mapped: | ||
158 | # access the PTD with EAR0[25:14] | ||
159 | # - DAMLR4 points to the virtual address of the appropriate page table | ||
160 | # - the PTD holds 4096 PTEs | ||
161 | # - the PTD must be accessed uncached | ||
162 | # - the PTE must be marked accessed if it was valid | ||
163 | # | ||
164 | and gr31,gr30,gr31 | ||
165 | movsg damlr4,gr30 | ||
166 | add gr30,gr31,gr31 | ||
167 | ldi @(gr31,#0),gr30 /* fetch the PTE */ | ||
168 | andicc gr30,#_PAGE_PRESENT,gr0,icc0 | ||
169 | ori.p gr30,#_PAGE_ACCESSED,gr30 | ||
170 | beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */ | ||
171 | sti.p gr30,@(gr31,#0) /* update the PTE */ | ||
172 | andi gr30,#~_PAGE_ACCESSED,gr30 | ||
173 | |||
174 | # we're using IAMR1 as an extra TLB entry | ||
175 | # - punt the entry here (if valid) to the real TLB and then replace with the new PTE | ||
176 | # - need to check DAMR1 lest we cause an multiple-DAT-hit exception | ||
177 | # - IAMPR1 has no WP bit, and we mustn't lose WP information | ||
178 | movsg iampr1,gr31 | ||
179 | andicc gr31,#xAMPRx_V,gr0,icc0 | ||
180 | setlos.p 0xfffff000,gr31 | ||
181 | beq icc0,#0,__itlb_k_nopunt /* punt not required */ | ||
182 | |||
183 | movsg iamlr1,gr31 | ||
184 | movgs gr31,tplr /* set TPLR.CXN */ | ||
185 | tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */ | ||
186 | |||
187 | movsg dampr1,gr31 | ||
188 | ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */ | ||
189 | movgs gr31,tppr | ||
190 | movsg iamlr1,gr31 /* set TPLR.CXN */ | ||
191 | movgs gr31,tplr | ||
192 | tlbpr gr31,gr0,#2,#0 /* save to the TLB */ | ||
193 | movsg tpxr,gr31 /* check the TLB write error flag */ | ||
194 | andicc.p gr31,#TPXR_E,gr0,icc0 | ||
195 | setlos #0xfffff000,gr31 | ||
196 | bne icc0,#0,__tlb_kernel_fault | ||
197 | |||
198 | __itlb_k_nopunt: | ||
199 | |||
200 | # assemble the new TLB entry | ||
201 | and gr29,gr31,gr29 | ||
202 | movsg cxnr,gr31 | ||
203 | or gr29,gr31,gr29 | ||
204 | movgs gr29,iamlr1 /* xAMLR = address | context number */ | ||
205 | movgs gr30,iampr1 | ||
206 | movgs gr29,damlr1 | ||
207 | movgs gr30,dampr1 | ||
208 | |||
209 | # return, restoring registers | ||
210 | movsg scr2,gr30 | ||
211 | movgs gr30,ccr | ||
212 | sethi.p %hi(__kernel_current_task),gr29 | ||
213 | setlo %lo(__kernel_current_task),gr29 | ||
214 | ldi @(gr29,#0),gr29 | ||
215 | rett #0 | ||
216 | beq icc0,#3,0 /* prevent icache prefetch */ | ||
217 | |||
218 | # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more | ||
219 | # appropriate page table and map that instead | ||
220 | # - access the PGD with EAR0[31:26] | ||
221 | # - DAMLR3 points to the virtual address of the page directory | ||
222 | # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables | ||
223 | __itlb_k_PTD_miss: | ||
224 | srli gr29,#26,gr31 /* calculate PGE offset */ | ||
225 | slli gr31,#8,gr31 /* and clear bottom bits */ | ||
226 | |||
227 | movsg damlr3,gr30 | ||
228 | ld @(gr31,gr30),gr30 /* access the PGE */ | ||
229 | |||
230 | andicc.p gr30,#_PAGE_PRESENT,gr0,icc0 | ||
231 | andicc gr30,#xAMPRx_SS,gr0,icc1 | ||
232 | |||
233 | # map this PTD instead and record coverage address | ||
234 | ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30 | ||
235 | beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */ | ||
236 | slli.p gr31,#18,gr31 | ||
237 | bne icc1,#0,__itlb_k_bigpage | ||
238 | movgs gr30,dampr4 | ||
239 | movgs gr31,scr0 | ||
240 | |||
241 | # we can now resume normal service | ||
242 | setlos 0x3ffc,gr30 | ||
243 | srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
244 | bra __itlb_k_PTD_mapped | ||
245 | |||
246 | __itlb_k_bigpage: | ||
247 | break | ||
248 | nop | ||
249 | |||
250 | .size __entry_kernel_insn_tlb_miss, .-__entry_kernel_insn_tlb_miss | ||
251 | |||
252 | ############################################################################### | ||
253 | # | ||
254 | # Kernel data TLB miss handler | ||
255 | # On entry: | ||
256 | # GR1 - kernel stack pointer | ||
257 | # GR28 - saved exception frame pointer | ||
258 | # GR29 - faulting address | ||
259 | # GR31 - EAR0 ^ SCR1 | ||
260 | # SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff) | ||
261 | # DAMR3 - mapped page directory | ||
262 | # DAMR5 - mapped page table as matched by SCR1 | ||
263 | # | ||
264 | ############################################################################### | ||
265 | .globl __entry_kernel_data_tlb_miss | ||
266 | .type __entry_kernel_data_tlb_miss,@function | ||
267 | __entry_kernel_data_tlb_miss: | ||
268 | #if 0 | ||
269 | sethi.p %hi(0xe1200004),gr30 | ||
270 | setlo %lo(0xe1200004),gr30 | ||
271 | st gr0,@(gr30,gr0) | ||
272 | sethi.p %hi(0xffc00100),gr30 | ||
273 | setlo %lo(0xffc00100),gr30 | ||
274 | sth gr30,@(gr30,gr0) | ||
275 | membar | ||
276 | #endif | ||
277 | |||
278 | movsg ccr,gr30 /* save CCR */ | ||
279 | movgs gr30,scr2 | ||
280 | |||
281 | # see if the cached page table mapping is appropriate | ||
282 | srlicc.p gr31,#26,gr0,icc0 | ||
283 | setlos 0x3ffc,gr30 | ||
284 | srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
285 | bne icc0,#0,__dtlb_k_PTD_miss | ||
286 | |||
287 | __dtlb_k_PTD_mapped: | ||
288 | # access the PTD with EAR0[25:14] | ||
289 | # - DAMLR5 points to the virtual address of the appropriate page table | ||
290 | # - the PTD holds 4096 PTEs | ||
291 | # - the PTD must be accessed uncached | ||
292 | # - the PTE must be marked accessed if it was valid | ||
293 | # | ||
294 | and gr31,gr30,gr31 | ||
295 | movsg damlr5,gr30 | ||
296 | add gr30,gr31,gr31 | ||
297 | ldi @(gr31,#0),gr30 /* fetch the PTE */ | ||
298 | andicc gr30,#_PAGE_PRESENT,gr0,icc0 | ||
299 | ori.p gr30,#_PAGE_ACCESSED,gr30 | ||
300 | beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */ | ||
301 | sti.p gr30,@(gr31,#0) /* update the PTE */ | ||
302 | andi gr30,#~_PAGE_ACCESSED,gr30 | ||
303 | |||
304 | # we're using DAMR1 as an extra TLB entry | ||
305 | # - punt the entry here (if valid) to the real TLB and then replace with the new PTE | ||
306 | # - need to check IAMR1 lest we cause an multiple-DAT-hit exception | ||
307 | movsg dampr1,gr31 | ||
308 | andicc gr31,#xAMPRx_V,gr0,icc0 | ||
309 | setlos.p 0xfffff000,gr31 | ||
310 | beq icc0,#0,__dtlb_k_nopunt /* punt not required */ | ||
311 | |||
312 | movsg damlr1,gr31 | ||
313 | movgs gr31,tplr /* set TPLR.CXN */ | ||
314 | tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */ | ||
315 | |||
316 | movsg dampr1,gr31 | ||
317 | ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */ | ||
318 | movgs gr31,tppr | ||
319 | movsg damlr1,gr31 /* set TPLR.CXN */ | ||
320 | movgs gr31,tplr | ||
321 | tlbpr gr31,gr0,#2,#0 /* save to the TLB */ | ||
322 | movsg tpxr,gr31 /* check the TLB write error flag */ | ||
323 | andicc.p gr31,#TPXR_E,gr0,icc0 | ||
324 | setlos #0xfffff000,gr31 | ||
325 | bne icc0,#0,__tlb_kernel_fault | ||
326 | |||
327 | __dtlb_k_nopunt: | ||
328 | |||
329 | # assemble the new TLB entry | ||
330 | and gr29,gr31,gr29 | ||
331 | movsg cxnr,gr31 | ||
332 | or gr29,gr31,gr29 | ||
333 | movgs gr29,iamlr1 /* xAMLR = address | context number */ | ||
334 | movgs gr30,iampr1 | ||
335 | movgs gr29,damlr1 | ||
336 | movgs gr30,dampr1 | ||
337 | |||
338 | # return, restoring registers | ||
339 | movsg scr2,gr30 | ||
340 | movgs gr30,ccr | ||
341 | sethi.p %hi(__kernel_current_task),gr29 | ||
342 | setlo %lo(__kernel_current_task),gr29 | ||
343 | ldi @(gr29,#0),gr29 | ||
344 | rett #0 | ||
345 | beq icc0,#3,0 /* prevent icache prefetch */ | ||
346 | |||
347 | # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more | ||
348 | # appropriate page table and map that instead | ||
349 | # - access the PGD with EAR0[31:26] | ||
350 | # - DAMLR3 points to the virtual address of the page directory | ||
351 | # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables | ||
352 | __dtlb_k_PTD_miss: | ||
353 | srli gr29,#26,gr31 /* calculate PGE offset */ | ||
354 | slli gr31,#8,gr31 /* and clear bottom bits */ | ||
355 | |||
356 | movsg damlr3,gr30 | ||
357 | ld @(gr31,gr30),gr30 /* access the PGE */ | ||
358 | |||
359 | andicc.p gr30,#_PAGE_PRESENT,gr0,icc0 | ||
360 | andicc gr30,#xAMPRx_SS,gr0,icc1 | ||
361 | |||
362 | # map this PTD instead and record coverage address | ||
363 | ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30 | ||
364 | beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */ | ||
365 | slli.p gr31,#18,gr31 | ||
366 | bne icc1,#0,__dtlb_k_bigpage | ||
367 | movgs gr30,dampr5 | ||
368 | movgs gr31,scr1 | ||
369 | |||
370 | # we can now resume normal service | ||
371 | setlos 0x3ffc,gr30 | ||
372 | srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
373 | bra __dtlb_k_PTD_mapped | ||
374 | |||
375 | __dtlb_k_bigpage: | ||
376 | break | ||
377 | nop | ||
378 | |||
379 | .size __entry_kernel_data_tlb_miss, .-__entry_kernel_data_tlb_miss | ||
380 | |||
381 | ############################################################################### | ||
382 | # | ||
383 | # Userspace instruction TLB miss handler (with PGE prediction) | ||
384 | # On entry: | ||
385 | # GR28 - faulting address | ||
386 | # GR31 - EAR0 ^ SCR0 | ||
387 | # SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff) | ||
388 | # DAMR3 - mapped page directory | ||
389 | # DAMR4 - mapped page table as matched by SCR0 | ||
390 | # | ||
391 | ############################################################################### | ||
392 | .globl __entry_user_insn_tlb_miss | ||
393 | .type __entry_user_insn_tlb_miss,@function | ||
394 | __entry_user_insn_tlb_miss: | ||
395 | #if 0 | ||
396 | sethi.p %hi(0xe1200004),gr30 | ||
397 | setlo %lo(0xe1200004),gr30 | ||
398 | st gr0,@(gr30,gr0) | ||
399 | sethi.p %hi(0xffc00100),gr30 | ||
400 | setlo %lo(0xffc00100),gr30 | ||
401 | sth gr30,@(gr30,gr0) | ||
402 | membar | ||
403 | #endif | ||
404 | |||
405 | movsg ccr,gr30 /* save CCR */ | ||
406 | movgs gr30,scr2 | ||
407 | |||
408 | # see if the cached page table mapping is appropriate | ||
409 | srlicc.p gr31,#26,gr0,icc0 | ||
410 | setlos 0x3ffc,gr30 | ||
411 | srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
412 | bne icc0,#0,__itlb_u_PTD_miss | ||
413 | |||
414 | __itlb_u_PTD_mapped: | ||
415 | # access the PTD with EAR0[25:14] | ||
416 | # - DAMLR4 points to the virtual address of the appropriate page table | ||
417 | # - the PTD holds 4096 PTEs | ||
418 | # - the PTD must be accessed uncached | ||
419 | # - the PTE must be marked accessed if it was valid | ||
420 | # | ||
421 | and gr31,gr30,gr31 | ||
422 | movsg damlr4,gr30 | ||
423 | add gr30,gr31,gr31 | ||
424 | ldi @(gr31,#0),gr30 /* fetch the PTE */ | ||
425 | andicc gr30,#_PAGE_PRESENT,gr0,icc0 | ||
426 | ori.p gr30,#_PAGE_ACCESSED,gr30 | ||
427 | beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */ | ||
428 | sti.p gr30,@(gr31,#0) /* update the PTE */ | ||
429 | andi gr30,#~_PAGE_ACCESSED,gr30 | ||
430 | |||
431 | # we're using IAMR1/DAMR1 as an extra TLB entry | ||
432 | # - punt the entry here (if valid) to the real TLB and then replace with the new PTE | ||
433 | movsg dampr1,gr31 | ||
434 | andicc gr31,#xAMPRx_V,gr0,icc0 | ||
435 | setlos.p 0xfffff000,gr31 | ||
436 | beq icc0,#0,__itlb_u_nopunt /* punt not required */ | ||
437 | |||
438 | movsg dampr1,gr31 | ||
439 | movgs gr31,tppr | ||
440 | movsg damlr1,gr31 /* set TPLR.CXN */ | ||
441 | movgs gr31,tplr | ||
442 | tlbpr gr31,gr0,#2,#0 /* save to the TLB */ | ||
443 | movsg tpxr,gr31 /* check the TLB write error flag */ | ||
444 | andicc.p gr31,#TPXR_E,gr0,icc0 | ||
445 | setlos #0xfffff000,gr31 | ||
446 | bne icc0,#0,__tlb_user_fault | ||
447 | |||
448 | __itlb_u_nopunt: | ||
449 | |||
450 | # assemble the new TLB entry | ||
451 | and gr28,gr31,gr28 | ||
452 | movsg cxnr,gr31 | ||
453 | or gr28,gr31,gr28 | ||
454 | movgs gr28,iamlr1 /* xAMLR = address | context number */ | ||
455 | movgs gr30,iampr1 | ||
456 | movgs gr28,damlr1 | ||
457 | movgs gr30,dampr1 | ||
458 | |||
459 | # return, restoring registers | ||
460 | movsg scr2,gr30 | ||
461 | movgs gr30,ccr | ||
462 | rett #0 | ||
463 | beq icc0,#3,0 /* prevent icache prefetch */ | ||
464 | |||
465 | # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more | ||
466 | # appropriate page table and map that instead | ||
467 | # - access the PGD with EAR0[31:26] | ||
468 | # - DAMLR3 points to the virtual address of the page directory | ||
469 | # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables | ||
470 | __itlb_u_PTD_miss: | ||
471 | srli gr28,#26,gr31 /* calculate PGE offset */ | ||
472 | slli gr31,#8,gr31 /* and clear bottom bits */ | ||
473 | |||
474 | movsg damlr3,gr30 | ||
475 | ld @(gr31,gr30),gr30 /* access the PGE */ | ||
476 | |||
477 | andicc.p gr30,#_PAGE_PRESENT,gr0,icc0 | ||
478 | andicc gr30,#xAMPRx_SS,gr0,icc1 | ||
479 | |||
480 | # map this PTD instead and record coverage address | ||
481 | ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30 | ||
482 | beq icc0,#0,__tlb_user_fault /* jump if PGE not present */ | ||
483 | slli.p gr31,#18,gr31 | ||
484 | bne icc1,#0,__itlb_u_bigpage | ||
485 | movgs gr30,dampr4 | ||
486 | movgs gr31,scr0 | ||
487 | |||
488 | # we can now resume normal service | ||
489 | setlos 0x3ffc,gr30 | ||
490 | srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
491 | bra __itlb_u_PTD_mapped | ||
492 | |||
493 | __itlb_u_bigpage: | ||
494 | break | ||
495 | nop | ||
496 | |||
497 | .size __entry_user_insn_tlb_miss, .-__entry_user_insn_tlb_miss | ||
498 | |||
499 | ############################################################################### | ||
500 | # | ||
501 | # Userspace data TLB miss handler | ||
502 | # On entry: | ||
503 | # GR28 - faulting address | ||
504 | # GR31 - EAR0 ^ SCR1 | ||
505 | # SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff) | ||
506 | # DAMR3 - mapped page directory | ||
507 | # DAMR5 - mapped page table as matched by SCR1 | ||
508 | # | ||
509 | ############################################################################### | ||
510 | .globl __entry_user_data_tlb_miss | ||
511 | .type __entry_user_data_tlb_miss,@function | ||
512 | __entry_user_data_tlb_miss: | ||
513 | #if 0 | ||
514 | sethi.p %hi(0xe1200004),gr30 | ||
515 | setlo %lo(0xe1200004),gr30 | ||
516 | st gr0,@(gr30,gr0) | ||
517 | sethi.p %hi(0xffc00100),gr30 | ||
518 | setlo %lo(0xffc00100),gr30 | ||
519 | sth gr30,@(gr30,gr0) | ||
520 | membar | ||
521 | #endif | ||
522 | |||
523 | movsg ccr,gr30 /* save CCR */ | ||
524 | movgs gr30,scr2 | ||
525 | |||
526 | # see if the cached page table mapping is appropriate | ||
527 | srlicc.p gr31,#26,gr0,icc0 | ||
528 | setlos 0x3ffc,gr30 | ||
529 | srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
530 | bne icc0,#0,__dtlb_u_PTD_miss | ||
531 | |||
532 | __dtlb_u_PTD_mapped: | ||
533 | # access the PTD with EAR0[25:14] | ||
534 | # - DAMLR5 points to the virtual address of the appropriate page table | ||
535 | # - the PTD holds 4096 PTEs | ||
536 | # - the PTD must be accessed uncached | ||
537 | # - the PTE must be marked accessed if it was valid | ||
538 | # | ||
539 | and gr31,gr30,gr31 | ||
540 | movsg damlr5,gr30 | ||
541 | |||
542 | __dtlb_u_using_iPTD: | ||
543 | add gr30,gr31,gr31 | ||
544 | ldi @(gr31,#0),gr30 /* fetch the PTE */ | ||
545 | andicc gr30,#_PAGE_PRESENT,gr0,icc0 | ||
546 | ori.p gr30,#_PAGE_ACCESSED,gr30 | ||
547 | beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */ | ||
548 | sti.p gr30,@(gr31,#0) /* update the PTE */ | ||
549 | andi gr30,#~_PAGE_ACCESSED,gr30 | ||
550 | |||
551 | # we're using DAMR1 as an extra TLB entry | ||
552 | # - punt the entry here (if valid) to the real TLB and then replace with the new PTE | ||
553 | movsg dampr1,gr31 | ||
554 | andicc gr31,#xAMPRx_V,gr0,icc0 | ||
555 | setlos.p 0xfffff000,gr31 | ||
556 | beq icc0,#0,__dtlb_u_nopunt /* punt not required */ | ||
557 | |||
558 | movsg dampr1,gr31 | ||
559 | movgs gr31,tppr | ||
560 | movsg damlr1,gr31 /* set TPLR.CXN */ | ||
561 | movgs gr31,tplr | ||
562 | tlbpr gr31,gr0,#2,#0 /* save to the TLB */ | ||
563 | movsg tpxr,gr31 /* check the TLB write error flag */ | ||
564 | andicc.p gr31,#TPXR_E,gr0,icc0 | ||
565 | setlos #0xfffff000,gr31 | ||
566 | bne icc0,#0,__tlb_user_fault | ||
567 | |||
568 | __dtlb_u_nopunt: | ||
569 | |||
570 | # assemble the new TLB entry | ||
571 | and gr28,gr31,gr28 | ||
572 | movsg cxnr,gr31 | ||
573 | or gr28,gr31,gr28 | ||
574 | movgs gr28,iamlr1 /* xAMLR = address | context number */ | ||
575 | movgs gr30,iampr1 | ||
576 | movgs gr28,damlr1 | ||
577 | movgs gr30,dampr1 | ||
578 | |||
579 | # return, restoring registers | ||
580 | movsg scr2,gr30 | ||
581 | movgs gr30,ccr | ||
582 | rett #0 | ||
583 | beq icc0,#3,0 /* prevent icache prefetch */ | ||
584 | |||
585 | # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more | ||
586 | # appropriate page table and map that instead | ||
587 | # - first of all, check the insn PGE cache - we may well get a hit there | ||
588 | # - access the PGD with EAR0[31:26] | ||
589 | # - DAMLR3 points to the virtual address of the page directory | ||
590 | # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables | ||
591 | __dtlb_u_PTD_miss: | ||
592 | movsg scr0,gr31 /* consult the insn-PGE-cache key */ | ||
593 | xor gr28,gr31,gr31 | ||
594 | srlicc gr31,#26,gr0,icc0 | ||
595 | srli gr28,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
596 | bne icc0,#0,__dtlb_u_iPGE_miss | ||
597 | |||
598 | # what we're looking for is covered by the insn-PGE-cache | ||
599 | setlos 0x3ffc,gr30 | ||
600 | and gr31,gr30,gr31 | ||
601 | movsg damlr4,gr30 | ||
602 | bra __dtlb_u_using_iPTD | ||
603 | |||
604 | __dtlb_u_iPGE_miss: | ||
605 | srli gr28,#26,gr31 /* calculate PGE offset */ | ||
606 | slli gr31,#8,gr31 /* and clear bottom bits */ | ||
607 | |||
608 | movsg damlr3,gr30 | ||
609 | ld @(gr31,gr30),gr30 /* access the PGE */ | ||
610 | |||
611 | andicc.p gr30,#_PAGE_PRESENT,gr0,icc0 | ||
612 | andicc gr30,#xAMPRx_SS,gr0,icc1 | ||
613 | |||
614 | # map this PTD instead and record coverage address | ||
615 | ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30 | ||
616 | beq icc0,#0,__tlb_user_fault /* jump if PGE not present */ | ||
617 | slli.p gr31,#18,gr31 | ||
618 | bne icc1,#0,__dtlb_u_bigpage | ||
619 | movgs gr30,dampr5 | ||
620 | movgs gr31,scr1 | ||
621 | |||
622 | # we can now resume normal service | ||
623 | setlos 0x3ffc,gr30 | ||
624 | srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
625 | bra __dtlb_u_PTD_mapped | ||
626 | |||
627 | __dtlb_u_bigpage: | ||
628 | break | ||
629 | nop | ||
630 | |||
631 | .size __entry_user_data_tlb_miss, .-__entry_user_data_tlb_miss | ||