aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorChen, Kenneth W <kenneth.w.chen@intel.com>2005-11-17 04:55:34 -0500
committerTony Luck <tony.luck@intel.com>2005-11-17 12:48:15 -0500
commite8aabc47168d24eabc08418db4e034a4c625721c (patch)
treefb35c1572de0967069e1c4a005ba841a604ad73e /arch/ia64
parentfedb25fae72bc2c3709448a43be067439643da87 (diff)
[IA64] polish comments for tlb fault handler in ivt.S
Polish the comments specifically in vhpt_miss and nested_dtlb_miss handlers. I think it's better to explicitly name each page table level with its name instead of numerically name them. i.e., use pgd, pud, pmd, and pte instead of referring as L1, L2, L3 etc. Along the line, remove some magic number in the comments like: "PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)". No code change at all, pure comment update. Feel free to shoot anything you have, darts or tomahawk cruise missile. I will duck behind a bunker ;-) Signed-off-by: Ken Chen <kenneth.w.chen@intel.com> Acked-by: Robin Holt <holt@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/ivt.S133
1 files changed, 71 insertions, 62 deletions
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index c71c79262a48..301f2e9d262e 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -91,16 +91,17 @@ ENTRY(vhpt_miss)
91 * (the "original") TLB miss, which may either be caused by an instruction 91 * (the "original") TLB miss, which may either be caused by an instruction
92 * fetch or a data access (or non-access). 92 * fetch or a data access (or non-access).
93 * 93 *
94 * What we do here is normal TLB miss handing for the _original_ miss, followed 94 * What we do here is normal TLB miss handing for the _original_ miss,
95 * by inserting the TLB entry for the virtual page table page that the VHPT 95 * followed by inserting the TLB entry for the virtual page table page
96 * walker was attempting to access. The latter gets inserted as long 96 * that the VHPT walker was attempting to access. The latter gets
97 * as both L1 and L2 have valid mappings for the faulting address. 97 * inserted as long as page table entry above pte level have valid
98 * The TLB entry for the original miss gets inserted only if 98 * mappings for the faulting address. The TLB entry for the original
99 * the L3 entry indicates that the page is present. 99 * miss gets inserted only if the pte entry indicates that the page is
100 * present.
100 * 101 *
101 * do_page_fault gets invoked in the following cases: 102 * do_page_fault gets invoked in the following cases:
102 * - the faulting virtual address uses unimplemented address bits 103 * - the faulting virtual address uses unimplemented address bits
103 * - the faulting virtual address has no L1, L2, or L3 mapping 104 * - the faulting virtual address has no valid page table mapping
104 */ 105 */
105 mov r16=cr.ifa // get address that caused the TLB miss 106 mov r16=cr.ifa // get address that caused the TLB miss
106#ifdef CONFIG_HUGETLB_PAGE 107#ifdef CONFIG_HUGETLB_PAGE
@@ -126,7 +127,7 @@ ENTRY(vhpt_miss)
126#endif 127#endif
127 ;; 128 ;;
128 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5? 129 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
129 shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address 130 shr.u r18=r22,PGDIR_SHIFT // get bottom portion of pgd index bit
130 ;; 131 ;;
131(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place 132(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
132 133
@@ -137,38 +138,38 @@ ENTRY(vhpt_miss)
137(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT 138(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
138(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 139(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
139 ;; 140 ;;
140(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8 141(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
141(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8) 142(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
142 cmp.eq p7,p6=0,r21 // unused address bits all zeroes? 143 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
143#ifdef CONFIG_PGTABLE_4 144#ifdef CONFIG_PGTABLE_4
144 shr.u r28=r22,PUD_SHIFT // shift L2 index into position 145 shr.u r28=r22,PUD_SHIFT // shift pud index into position
145#else 146#else
146 shr.u r18=r22,PMD_SHIFT // shift L3 index into position 147 shr.u r18=r22,PMD_SHIFT // shift pmd index into position
147#endif 148#endif
148 ;; 149 ;;
149 ld8 r17=[r17] // fetch the L1 entry (may be 0) 150 ld8 r17=[r17] // get *pgd (may be 0)
150 ;; 151 ;;
151(p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL? 152(p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
152#ifdef CONFIG_PGTABLE_4 153#ifdef CONFIG_PGTABLE_4
153 dep r28=r28,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry 154 dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr)
154 ;; 155 ;;
155 shr.u r18=r22,PMD_SHIFT // shift L3 index into position 156 shr.u r18=r22,PMD_SHIFT // shift pmd index into position
156(p7) ld8 r29=[r28] // fetch the L2 entry (may be 0) 157(p7) ld8 r29=[r28] // get *pud (may be 0)
157 ;; 158 ;;
158(p7) cmp.eq.or.andcm p6,p7=r29,r0 // was L2 entry NULL? 159(p7) cmp.eq.or.andcm p6,p7=r29,r0 // was pud_present(*pud) == NULL?
159 dep r17=r18,r29,3,(PAGE_SHIFT-3) // compute address of L3 page table entry 160 dep r17=r18,r29,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr)
160#else 161#else
161 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table entry 162 dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pgd,addr)
162#endif 163#endif
163 ;; 164 ;;
164(p7) ld8 r20=[r17] // fetch the L3 entry (may be 0) 165(p7) ld8 r20=[r17] // get *pmd (may be 0)
165 shr.u r19=r22,PAGE_SHIFT // shift L4 index into position 166 shr.u r19=r22,PAGE_SHIFT // shift pte index into position
166 ;; 167 ;;
167(p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L3 entry NULL? 168(p7) cmp.eq.or.andcm p6,p7=r20,r0 // was pmd_present(*pmd) == NULL?
168 dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L4 page table entry 169 dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr)
169 ;; 170 ;;
170(p7) ld8 r18=[r21] // read the L4 PTE 171(p7) ld8 r18=[r21] // read *pte
171 mov r19=cr.isr // cr.isr bit 0 tells us if this is an insn miss 172 mov r19=cr.isr // cr.isr bit 32 tells us if this is an insn miss
172 ;; 173 ;;
173(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? 174(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
174 mov r22=cr.iha // get the VHPT address that caused the TLB miss 175 mov r22=cr.iha // get the VHPT address that caused the TLB miss
@@ -202,25 +203,33 @@ ENTRY(vhpt_miss)
202 dv_serialize_data 203 dv_serialize_data
203 204
204 /* 205 /*
205 * Re-check L2 and L3 pagetable. If they changed, we may have received a ptc.g 206 * Re-check pagetable entry. If they changed, we may have received a ptc.g
206 * between reading the pagetable and the "itc". If so, flush the entry we 207 * between reading the pagetable and the "itc". If so, flush the entry we
207 * inserted and retry. 208 * inserted and retry. At this point, we have:
209 *
210 * r28 = equivalent of pud_offset(pgd, ifa)
211 * r17 = equivalent of pmd_offset(pud, ifa)
212 * r21 = equivalent of pte_offset(pmd, ifa)
213 *
214 * r29 = *pud
215 * r20 = *pmd
216 * r18 = *pte
208 */ 217 */
209 ld8 r25=[r21] // read L4 entry again 218 ld8 r25=[r21] // read *pte again
210 ld8 r26=[r17] // read L3 PTE again 219 ld8 r26=[r17] // read *pmd again
211#ifdef CONFIG_PGTABLE_4 220#ifdef CONFIG_PGTABLE_4
212 ld8 r19=[r28] // read L2 entry again 221 ld8 r19=[r28] // read *pud again
213#endif 222#endif
214 cmp.ne p6,p7=r0,r0 223 cmp.ne p6,p7=r0,r0
215 ;; 224 ;;
216 cmp.ne.or.andcm p6,p7=r26,r20 // did L3 entry change 225 cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change
217#ifdef CONFIG_PGTABLE_4 226#ifdef CONFIG_PGTABLE_4
218 cmp.ne.or.andcm p6,p7=r19,r29 // did L4 PTE change 227 cmp.ne.or.andcm p6,p7=r19,r29 // did *pud change
219#endif 228#endif
220 mov r27=PAGE_SHIFT<<2 229 mov r27=PAGE_SHIFT<<2
221 ;; 230 ;;
222(p6) ptc.l r22,r27 // purge PTE page translation 231(p6) ptc.l r22,r27 // purge PTE page translation
223(p7) cmp.ne.or.andcm p6,p7=r25,r18 // did L4 PTE change 232(p7) cmp.ne.or.andcm p6,p7=r25,r18 // did *pte change
224 ;; 233 ;;
225(p6) ptc.l r16,r27 // purge translation 234(p6) ptc.l r16,r27 // purge translation
226#endif 235#endif
@@ -235,19 +244,19 @@ END(vhpt_miss)
235ENTRY(itlb_miss) 244ENTRY(itlb_miss)
236 DBG_FAULT(1) 245 DBG_FAULT(1)
237 /* 246 /*
238 * The ITLB handler accesses the L3 PTE via the virtually mapped linear 247 * The ITLB handler accesses the PTE via the virtually mapped linear
239 * page table. If a nested TLB miss occurs, we switch into physical 248 * page table. If a nested TLB miss occurs, we switch into physical
240 * mode, walk the page table, and then re-execute the L3 PTE read 249 * mode, walk the page table, and then re-execute the PTE read and
241 * and go on normally after that. 250 * go on normally after that.
242 */ 251 */
243 mov r16=cr.ifa // get virtual address 252 mov r16=cr.ifa // get virtual address
244 mov r29=b0 // save b0 253 mov r29=b0 // save b0
245 mov r31=pr // save predicates 254 mov r31=pr // save predicates
246.itlb_fault: 255.itlb_fault:
247 mov r17=cr.iha // get virtual address of L3 PTE 256 mov r17=cr.iha // get virtual address of PTE
248 movl r30=1f // load nested fault continuation point 257 movl r30=1f // load nested fault continuation point
249 ;; 258 ;;
2501: ld8 r18=[r17] // read L3 PTE 2591: ld8 r18=[r17] // read *pte
251 ;; 260 ;;
252 mov b0=r29 261 mov b0=r29
253 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 262 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
@@ -262,7 +271,7 @@ ENTRY(itlb_miss)
262 */ 271 */
263 dv_serialize_data 272 dv_serialize_data
264 273
265 ld8 r19=[r17] // read L3 PTE again and see if same 274 ld8 r19=[r17] // read *pte again and see if same
266 mov r20=PAGE_SHIFT<<2 // setup page size for purge 275 mov r20=PAGE_SHIFT<<2 // setup page size for purge
267 ;; 276 ;;
268 cmp.ne p7,p0=r18,r19 277 cmp.ne p7,p0=r18,r19
@@ -279,19 +288,19 @@ END(itlb_miss)
279ENTRY(dtlb_miss) 288ENTRY(dtlb_miss)
280 DBG_FAULT(2) 289 DBG_FAULT(2)
281 /* 290 /*
282 * The DTLB handler accesses the L3 PTE via the virtually mapped linear 291 * The DTLB handler accesses the PTE via the virtually mapped linear
283 * page table. If a nested TLB miss occurs, we switch into physical 292 * page table. If a nested TLB miss occurs, we switch into physical
284 * mode, walk the page table, and then re-execute the L3 PTE read 293 * mode, walk the page table, and then re-execute the PTE read and
285 * and go on normally after that. 294 * go on normally after that.
286 */ 295 */
287 mov r16=cr.ifa // get virtual address 296 mov r16=cr.ifa // get virtual address
288 mov r29=b0 // save b0 297 mov r29=b0 // save b0
289 mov r31=pr // save predicates 298 mov r31=pr // save predicates
290dtlb_fault: 299dtlb_fault:
291 mov r17=cr.iha // get virtual address of L3 PTE 300 mov r17=cr.iha // get virtual address of PTE
292 movl r30=1f // load nested fault continuation point 301 movl r30=1f // load nested fault continuation point
293 ;; 302 ;;
2941: ld8 r18=[r17] // read L3 PTE 3031: ld8 r18=[r17] // read *pte
295 ;; 304 ;;
296 mov b0=r29 305 mov b0=r29
297 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 306 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
@@ -306,7 +315,7 @@ dtlb_fault:
306 */ 315 */
307 dv_serialize_data 316 dv_serialize_data
308 317
309 ld8 r19=[r17] // read L3 PTE again and see if same 318 ld8 r19=[r17] // read *pte again and see if same
310 mov r20=PAGE_SHIFT<<2 // setup page size for purge 319 mov r20=PAGE_SHIFT<<2 // setup page size for purge
311 ;; 320 ;;
312 cmp.ne p7,p0=r18,r19 321 cmp.ne p7,p0=r18,r19
@@ -420,7 +429,7 @@ ENTRY(nested_dtlb_miss)
420 * r30: continuation address 429 * r30: continuation address
421 * r31: saved pr 430 * r31: saved pr
422 * 431 *
423 * Output: r17: physical address of L3 PTE of faulting address 432 * Output: r17: physical address of PTE of faulting address
424 * r29: saved b0 433 * r29: saved b0
425 * r30: continuation address 434 * r30: continuation address
426 * r31: saved pr 435 * r31: saved pr
@@ -450,33 +459,33 @@ ENTRY(nested_dtlb_miss)
450(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT 459(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
451(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 460(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
452 ;; 461 ;;
453(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8 462(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
454(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8) 463(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
455 cmp.eq p7,p6=0,r21 // unused address bits all zeroes? 464 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
456#ifdef CONFIG_PGTABLE_4 465#ifdef CONFIG_PGTABLE_4
457 shr.u r18=r22,PUD_SHIFT // shift L2 index into position 466 shr.u r18=r22,PUD_SHIFT // shift pud index into position
458#else 467#else
459 shr.u r18=r22,PMD_SHIFT // shift L3 index into position 468 shr.u r18=r22,PMD_SHIFT // shift pmd index into position
460#endif 469#endif
461 ;; 470 ;;
462 ld8 r17=[r17] // fetch the L1 entry (may be 0) 471 ld8 r17=[r17] // get *pgd (may be 0)
463 ;; 472 ;;
464(p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL? 473(p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
465 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry 474 dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=p[u|m]d_offset(pgd,addr)
466 ;; 475 ;;
467#ifdef CONFIG_PGTABLE_4 476#ifdef CONFIG_PGTABLE_4
468(p7) ld8 r17=[r17] // fetch the L2 entry (may be 0) 477(p7) ld8 r17=[r17] // get *pud (may be 0)
469 shr.u r18=r22,PMD_SHIFT // shift L3 index into position 478 shr.u r18=r22,PMD_SHIFT // shift pmd index into position
470 ;; 479 ;;
471(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL? 480(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pud_present(*pud) == NULL?
472 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry 481 dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr)
473 ;; 482 ;;
474#endif 483#endif
475(p7) ld8 r17=[r17] // fetch the L3 entry (may be 0) 484(p7) ld8 r17=[r17] // get *pmd (may be 0)
476 shr.u r19=r22,PAGE_SHIFT // shift L4 index into position 485 shr.u r19=r22,PAGE_SHIFT // shift pte index into position
477 ;; 486 ;;
478(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L3 entry NULL? 487(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pmd_present(*pmd) == NULL?
479 dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L4 page table entry 488 dep r17=r19,r17,3,(PAGE_SHIFT-3) // r17=pte_offset(pmd,addr);
480(p6) br.cond.spnt page_fault 489(p6) br.cond.spnt page_fault
481 mov b0=r30 490 mov b0=r30
482 br.sptk.many b0 // return to continuation point 491 br.sptk.many b0 // return to continuation point