diff options
Diffstat (limited to 'arch/ia64/kernel/ivt.S')
-rw-r--r-- | arch/ia64/kernel/ivt.S | 462 |
1 files changed, 231 insertions, 231 deletions
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 80b44ea052d7..c39627df3cde 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
@@ -12,6 +12,14 @@ | |||
12 | * | 12 | * |
13 | * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP | 13 | * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP |
14 | * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT. | 14 | * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT. |
15 | * | ||
16 | * Copyright (C) 2005 Hewlett-Packard Co | ||
17 | * Dan Magenheimer <dan.magenheimer@hp.com> | ||
18 | * Xen paravirtualization | ||
19 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
20 | * VA Linux Systems Japan K.K. | ||
21 | * pv_ops. | ||
22 | * Yaozu (Eddie) Dong <eddie.dong@intel.com> | ||
15 | */ | 23 | */ |
16 | /* | 24 | /* |
17 | * This file defines the interruption vector table used by the CPU. | 25 | * This file defines the interruption vector table used by the CPU. |
@@ -102,13 +110,13 @@ ENTRY(vhpt_miss) | |||
102 | * - the faulting virtual address uses unimplemented address bits | 110 | * - the faulting virtual address uses unimplemented address bits |
103 | * - the faulting virtual address has no valid page table mapping | 111 | * - the faulting virtual address has no valid page table mapping |
104 | */ | 112 | */ |
105 | mov r16=cr.ifa // get address that caused the TLB miss | 113 | MOV_FROM_IFA(r16) // get address that caused the TLB miss |
106 | #ifdef CONFIG_HUGETLB_PAGE | 114 | #ifdef CONFIG_HUGETLB_PAGE |
107 | movl r18=PAGE_SHIFT | 115 | movl r18=PAGE_SHIFT |
108 | mov r25=cr.itir | 116 | MOV_FROM_ITIR(r25) |
109 | #endif | 117 | #endif |
110 | ;; | 118 | ;; |
111 | rsm psr.dt // use physical addressing for data | 119 | RSM_PSR_DT // use physical addressing for data |
112 | mov r31=pr // save the predicate registers | 120 | mov r31=pr // save the predicate registers |
113 | mov r19=IA64_KR(PT_BASE) // get page table base address | 121 | mov r19=IA64_KR(PT_BASE) // get page table base address |
114 | shl r21=r16,3 // shift bit 60 into sign bit | 122 | shl r21=r16,3 // shift bit 60 into sign bit |
@@ -168,21 +176,21 @@ ENTRY(vhpt_miss) | |||
168 | dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr) | 176 | dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr) |
169 | ;; | 177 | ;; |
170 | (p7) ld8 r18=[r21] // read *pte | 178 | (p7) ld8 r18=[r21] // read *pte |
171 | mov r19=cr.isr // cr.isr bit 32 tells us if this is an insn miss | 179 | MOV_FROM_ISR(r19) // cr.isr bit 32 tells us if this is an insn miss |
172 | ;; | 180 | ;; |
173 | (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? | 181 | (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? |
174 | mov r22=cr.iha // get the VHPT address that caused the TLB miss | 182 | MOV_FROM_IHA(r22) // get the VHPT address that caused the TLB miss |
175 | ;; // avoid RAW on p7 | 183 | ;; // avoid RAW on p7 |
176 | (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss? | 184 | (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss? |
177 | dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address | 185 | dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address |
178 | ;; | 186 | ;; |
179 | (p10) itc.i r18 // insert the instruction TLB entry | 187 | ITC_I_AND_D(p10, p11, r18, r24) // insert the instruction TLB entry and |
180 | (p11) itc.d r18 // insert the data TLB entry | 188 | // insert the data TLB entry |
181 | (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) | 189 | (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) |
182 | mov cr.ifa=r22 | 190 | MOV_TO_IFA(r22, r24) |
183 | 191 | ||
184 | #ifdef CONFIG_HUGETLB_PAGE | 192 | #ifdef CONFIG_HUGETLB_PAGE |
185 | (p8) mov cr.itir=r25 // change to default page-size for VHPT | 193 | MOV_TO_ITIR(p8, r25, r24) // change to default page-size for VHPT |
186 | #endif | 194 | #endif |
187 | 195 | ||
188 | /* | 196 | /* |
@@ -192,7 +200,7 @@ ENTRY(vhpt_miss) | |||
192 | */ | 200 | */ |
193 | adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23 | 201 | adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23 |
194 | ;; | 202 | ;; |
195 | (p7) itc.d r24 | 203 | ITC_D(p7, r24, r25) |
196 | ;; | 204 | ;; |
197 | #ifdef CONFIG_SMP | 205 | #ifdef CONFIG_SMP |
198 | /* | 206 | /* |
@@ -234,7 +242,7 @@ ENTRY(vhpt_miss) | |||
234 | #endif | 242 | #endif |
235 | 243 | ||
236 | mov pr=r31,-1 // restore predicate registers | 244 | mov pr=r31,-1 // restore predicate registers |
237 | rfi | 245 | RFI |
238 | END(vhpt_miss) | 246 | END(vhpt_miss) |
239 | 247 | ||
240 | .org ia64_ivt+0x400 | 248 | .org ia64_ivt+0x400 |
@@ -248,11 +256,11 @@ ENTRY(itlb_miss) | |||
248 | * mode, walk the page table, and then re-execute the PTE read and | 256 | * mode, walk the page table, and then re-execute the PTE read and |
249 | * go on normally after that. | 257 | * go on normally after that. |
250 | */ | 258 | */ |
251 | mov r16=cr.ifa // get virtual address | 259 | MOV_FROM_IFA(r16) // get virtual address |
252 | mov r29=b0 // save b0 | 260 | mov r29=b0 // save b0 |
253 | mov r31=pr // save predicates | 261 | mov r31=pr // save predicates |
254 | .itlb_fault: | 262 | .itlb_fault: |
255 | mov r17=cr.iha // get virtual address of PTE | 263 | MOV_FROM_IHA(r17) // get virtual address of PTE |
256 | movl r30=1f // load nested fault continuation point | 264 | movl r30=1f // load nested fault continuation point |
257 | ;; | 265 | ;; |
258 | 1: ld8 r18=[r17] // read *pte | 266 | 1: ld8 r18=[r17] // read *pte |
@@ -261,7 +269,7 @@ ENTRY(itlb_miss) | |||
261 | tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? | 269 | tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? |
262 | (p6) br.cond.spnt page_fault | 270 | (p6) br.cond.spnt page_fault |
263 | ;; | 271 | ;; |
264 | itc.i r18 | 272 | ITC_I(p0, r18, r19) |
265 | ;; | 273 | ;; |
266 | #ifdef CONFIG_SMP | 274 | #ifdef CONFIG_SMP |
267 | /* | 275 | /* |
@@ -278,7 +286,7 @@ ENTRY(itlb_miss) | |||
278 | (p7) ptc.l r16,r20 | 286 | (p7) ptc.l r16,r20 |
279 | #endif | 287 | #endif |
280 | mov pr=r31,-1 | 288 | mov pr=r31,-1 |
281 | rfi | 289 | RFI |
282 | END(itlb_miss) | 290 | END(itlb_miss) |
283 | 291 | ||
284 | .org ia64_ivt+0x0800 | 292 | .org ia64_ivt+0x0800 |
@@ -292,11 +300,11 @@ ENTRY(dtlb_miss) | |||
292 | * mode, walk the page table, and then re-execute the PTE read and | 300 | * mode, walk the page table, and then re-execute the PTE read and |
293 | * go on normally after that. | 301 | * go on normally after that. |
294 | */ | 302 | */ |
295 | mov r16=cr.ifa // get virtual address | 303 | MOV_FROM_IFA(r16) // get virtual address |
296 | mov r29=b0 // save b0 | 304 | mov r29=b0 // save b0 |
297 | mov r31=pr // save predicates | 305 | mov r31=pr // save predicates |
298 | dtlb_fault: | 306 | dtlb_fault: |
299 | mov r17=cr.iha // get virtual address of PTE | 307 | MOV_FROM_IHA(r17) // get virtual address of PTE |
300 | movl r30=1f // load nested fault continuation point | 308 | movl r30=1f // load nested fault continuation point |
301 | ;; | 309 | ;; |
302 | 1: ld8 r18=[r17] // read *pte | 310 | 1: ld8 r18=[r17] // read *pte |
@@ -305,7 +313,7 @@ dtlb_fault: | |||
305 | tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? | 313 | tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? |
306 | (p6) br.cond.spnt page_fault | 314 | (p6) br.cond.spnt page_fault |
307 | ;; | 315 | ;; |
308 | itc.d r18 | 316 | ITC_D(p0, r18, r19) |
309 | ;; | 317 | ;; |
310 | #ifdef CONFIG_SMP | 318 | #ifdef CONFIG_SMP |
311 | /* | 319 | /* |
@@ -322,7 +330,7 @@ dtlb_fault: | |||
322 | (p7) ptc.l r16,r20 | 330 | (p7) ptc.l r16,r20 |
323 | #endif | 331 | #endif |
324 | mov pr=r31,-1 | 332 | mov pr=r31,-1 |
325 | rfi | 333 | RFI |
326 | END(dtlb_miss) | 334 | END(dtlb_miss) |
327 | 335 | ||
328 | .org ia64_ivt+0x0c00 | 336 | .org ia64_ivt+0x0c00 |
@@ -330,9 +338,9 @@ END(dtlb_miss) | |||
330 | // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) | 338 | // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) |
331 | ENTRY(alt_itlb_miss) | 339 | ENTRY(alt_itlb_miss) |
332 | DBG_FAULT(3) | 340 | DBG_FAULT(3) |
333 | mov r16=cr.ifa // get address that caused the TLB miss | 341 | MOV_FROM_IFA(r16) // get address that caused the TLB miss |
334 | movl r17=PAGE_KERNEL | 342 | movl r17=PAGE_KERNEL |
335 | mov r21=cr.ipsr | 343 | MOV_FROM_IPSR(p0, r21) |
336 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) | 344 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
337 | mov r31=pr | 345 | mov r31=pr |
338 | ;; | 346 | ;; |
@@ -341,9 +349,9 @@ ENTRY(alt_itlb_miss) | |||
341 | ;; | 349 | ;; |
342 | cmp.gt p8,p0=6,r22 // user mode | 350 | cmp.gt p8,p0=6,r22 // user mode |
343 | ;; | 351 | ;; |
344 | (p8) thash r17=r16 | 352 | THASH(p8, r17, r16, r23) |
345 | ;; | 353 | ;; |
346 | (p8) mov cr.iha=r17 | 354 | MOV_TO_IHA(p8, r17, r23) |
347 | (p8) mov r29=b0 // save b0 | 355 | (p8) mov r29=b0 // save b0 |
348 | (p8) br.cond.dptk .itlb_fault | 356 | (p8) br.cond.dptk .itlb_fault |
349 | #endif | 357 | #endif |
@@ -358,9 +366,9 @@ ENTRY(alt_itlb_miss) | |||
358 | or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 | 366 | or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 |
359 | (p8) br.cond.spnt page_fault | 367 | (p8) br.cond.spnt page_fault |
360 | ;; | 368 | ;; |
361 | itc.i r19 // insert the TLB entry | 369 | ITC_I(p0, r19, r18) // insert the TLB entry |
362 | mov pr=r31,-1 | 370 | mov pr=r31,-1 |
363 | rfi | 371 | RFI |
364 | END(alt_itlb_miss) | 372 | END(alt_itlb_miss) |
365 | 373 | ||
366 | .org ia64_ivt+0x1000 | 374 | .org ia64_ivt+0x1000 |
@@ -368,11 +376,11 @@ END(alt_itlb_miss) | |||
368 | // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) | 376 | // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) |
369 | ENTRY(alt_dtlb_miss) | 377 | ENTRY(alt_dtlb_miss) |
370 | DBG_FAULT(4) | 378 | DBG_FAULT(4) |
371 | mov r16=cr.ifa // get address that caused the TLB miss | 379 | MOV_FROM_IFA(r16) // get address that caused the TLB miss |
372 | movl r17=PAGE_KERNEL | 380 | movl r17=PAGE_KERNEL |
373 | mov r20=cr.isr | 381 | MOV_FROM_ISR(r20) |
374 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) | 382 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
375 | mov r21=cr.ipsr | 383 | MOV_FROM_IPSR(p0, r21) |
376 | mov r31=pr | 384 | mov r31=pr |
377 | mov r24=PERCPU_ADDR | 385 | mov r24=PERCPU_ADDR |
378 | ;; | 386 | ;; |
@@ -381,9 +389,9 @@ ENTRY(alt_dtlb_miss) | |||
381 | ;; | 389 | ;; |
382 | cmp.gt p8,p0=6,r22 // access to region 0-5 | 390 | cmp.gt p8,p0=6,r22 // access to region 0-5 |
383 | ;; | 391 | ;; |
384 | (p8) thash r17=r16 | 392 | THASH(p8, r17, r16, r25) |
385 | ;; | 393 | ;; |
386 | (p8) mov cr.iha=r17 | 394 | MOV_TO_IHA(p8, r17, r25) |
387 | (p8) mov r29=b0 // save b0 | 395 | (p8) mov r29=b0 // save b0 |
388 | (p8) br.cond.dptk dtlb_fault | 396 | (p8) br.cond.dptk dtlb_fault |
389 | #endif | 397 | #endif |
@@ -402,7 +410,7 @@ ENTRY(alt_dtlb_miss) | |||
402 | tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? | 410 | tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? |
403 | ;; | 411 | ;; |
404 | (p10) sub r19=r19,r26 | 412 | (p10) sub r19=r19,r26 |
405 | (p10) mov cr.itir=r25 | 413 | MOV_TO_ITIR(p10, r25, r24) |
406 | cmp.ne p8,p0=r0,r23 | 414 | cmp.ne p8,p0=r0,r23 |
407 | (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field | 415 | (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field |
408 | (p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr | 416 | (p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr |
@@ -411,11 +419,11 @@ ENTRY(alt_dtlb_miss) | |||
411 | dep r21=-1,r21,IA64_PSR_ED_BIT,1 | 419 | dep r21=-1,r21,IA64_PSR_ED_BIT,1 |
412 | ;; | 420 | ;; |
413 | or r19=r19,r17 // insert PTE control bits into r19 | 421 | or r19=r19,r17 // insert PTE control bits into r19 |
414 | (p6) mov cr.ipsr=r21 | 422 | MOV_TO_IPSR(p6, r21, r24) |
415 | ;; | 423 | ;; |
416 | (p7) itc.d r19 // insert the TLB entry | 424 | ITC_D(p7, r19, r18) // insert the TLB entry |
417 | mov pr=r31,-1 | 425 | mov pr=r31,-1 |
418 | rfi | 426 | RFI |
419 | END(alt_dtlb_miss) | 427 | END(alt_dtlb_miss) |
420 | 428 | ||
421 | .org ia64_ivt+0x1400 | 429 | .org ia64_ivt+0x1400 |
@@ -444,10 +452,10 @@ ENTRY(nested_dtlb_miss) | |||
444 | * | 452 | * |
445 | * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared) | 453 | * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared) |
446 | */ | 454 | */ |
447 | rsm psr.dt // switch to using physical data addressing | 455 | RSM_PSR_DT // switch to using physical data addressing |
448 | mov r19=IA64_KR(PT_BASE) // get the page table base address | 456 | mov r19=IA64_KR(PT_BASE) // get the page table base address |
449 | shl r21=r16,3 // shift bit 60 into sign bit | 457 | shl r21=r16,3 // shift bit 60 into sign bit |
450 | mov r18=cr.itir | 458 | MOV_FROM_ITIR(r18) |
451 | ;; | 459 | ;; |
452 | shr.u r17=r16,61 // get the region number into r17 | 460 | shr.u r17=r16,61 // get the region number into r17 |
453 | extr.u r18=r18,2,6 // get the faulting page size | 461 | extr.u r18=r18,2,6 // get the faulting page size |
@@ -507,33 +515,6 @@ ENTRY(ikey_miss) | |||
507 | FAULT(6) | 515 | FAULT(6) |
508 | END(ikey_miss) | 516 | END(ikey_miss) |
509 | 517 | ||
510 | //----------------------------------------------------------------------------------- | ||
511 | // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address) | ||
512 | ENTRY(page_fault) | ||
513 | ssm psr.dt | ||
514 | ;; | ||
515 | srlz.i | ||
516 | ;; | ||
517 | SAVE_MIN_WITH_COVER | ||
518 | alloc r15=ar.pfs,0,0,3,0 | ||
519 | mov out0=cr.ifa | ||
520 | mov out1=cr.isr | ||
521 | adds r3=8,r2 // set up second base pointer | ||
522 | ;; | ||
523 | ssm psr.ic | PSR_DEFAULT_BITS | ||
524 | ;; | ||
525 | srlz.i // guarantee that interruption collectin is on | ||
526 | ;; | ||
527 | (p15) ssm psr.i // restore psr.i | ||
528 | movl r14=ia64_leave_kernel | ||
529 | ;; | ||
530 | SAVE_REST | ||
531 | mov rp=r14 | ||
532 | ;; | ||
533 | adds out2=16,r12 // out2 = pointer to pt_regs | ||
534 | br.call.sptk.many b6=ia64_do_page_fault // ignore return address | ||
535 | END(page_fault) | ||
536 | |||
537 | .org ia64_ivt+0x1c00 | 518 | .org ia64_ivt+0x1c00 |
538 | ///////////////////////////////////////////////////////////////////////////////////////// | 519 | ///////////////////////////////////////////////////////////////////////////////////////// |
539 | // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) | 520 | // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) |
@@ -556,10 +537,10 @@ ENTRY(dirty_bit) | |||
556 | * page table TLB entry isn't present, we take a nested TLB miss hit where we look | 537 | * page table TLB entry isn't present, we take a nested TLB miss hit where we look |
557 | * up the physical address of the L3 PTE and then continue at label 1 below. | 538 | * up the physical address of the L3 PTE and then continue at label 1 below. |
558 | */ | 539 | */ |
559 | mov r16=cr.ifa // get the address that caused the fault | 540 | MOV_FROM_IFA(r16) // get the address that caused the fault |
560 | movl r30=1f // load continuation point in case of nested fault | 541 | movl r30=1f // load continuation point in case of nested fault |
561 | ;; | 542 | ;; |
562 | thash r17=r16 // compute virtual address of L3 PTE | 543 | THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE |
563 | mov r29=b0 // save b0 in case of nested fault | 544 | mov r29=b0 // save b0 in case of nested fault |
564 | mov r31=pr // save pr | 545 | mov r31=pr // save pr |
565 | #ifdef CONFIG_SMP | 546 | #ifdef CONFIG_SMP |
@@ -576,7 +557,7 @@ ENTRY(dirty_bit) | |||
576 | ;; | 557 | ;; |
577 | (p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present | 558 | (p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present |
578 | ;; | 559 | ;; |
579 | (p6) itc.d r25 // install updated PTE | 560 | ITC_D(p6, r25, r18) // install updated PTE |
580 | ;; | 561 | ;; |
581 | /* | 562 | /* |
582 | * Tell the assemblers dependency-violation checker that the above "itc" instructions | 563 | * Tell the assemblers dependency-violation checker that the above "itc" instructions |
@@ -602,7 +583,7 @@ ENTRY(dirty_bit) | |||
602 | itc.d r18 // install updated PTE | 583 | itc.d r18 // install updated PTE |
603 | #endif | 584 | #endif |
604 | mov pr=r31,-1 // restore pr | 585 | mov pr=r31,-1 // restore pr |
605 | rfi | 586 | RFI |
606 | END(dirty_bit) | 587 | END(dirty_bit) |
607 | 588 | ||
608 | .org ia64_ivt+0x2400 | 589 | .org ia64_ivt+0x2400 |
@@ -611,22 +592,22 @@ END(dirty_bit) | |||
611 | ENTRY(iaccess_bit) | 592 | ENTRY(iaccess_bit) |
612 | DBG_FAULT(9) | 593 | DBG_FAULT(9) |
613 | // Like Entry 8, except for instruction access | 594 | // Like Entry 8, except for instruction access |
614 | mov r16=cr.ifa // get the address that caused the fault | 595 | MOV_FROM_IFA(r16) // get the address that caused the fault |
615 | movl r30=1f // load continuation point in case of nested fault | 596 | movl r30=1f // load continuation point in case of nested fault |
616 | mov r31=pr // save predicates | 597 | mov r31=pr // save predicates |
617 | #ifdef CONFIG_ITANIUM | 598 | #ifdef CONFIG_ITANIUM |
618 | /* | 599 | /* |
619 | * Erratum 10 (IFA may contain incorrect address) has "NoFix" status. | 600 | * Erratum 10 (IFA may contain incorrect address) has "NoFix" status. |
620 | */ | 601 | */ |
621 | mov r17=cr.ipsr | 602 | MOV_FROM_IPSR(p0, r17) |
622 | ;; | 603 | ;; |
623 | mov r18=cr.iip | 604 | MOV_FROM_IIP(r18) |
624 | tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set? | 605 | tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set? |
625 | ;; | 606 | ;; |
626 | (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa | 607 | (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa |
627 | #endif /* CONFIG_ITANIUM */ | 608 | #endif /* CONFIG_ITANIUM */ |
628 | ;; | 609 | ;; |
629 | thash r17=r16 // compute virtual address of L3 PTE | 610 | THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE |
630 | mov r29=b0 // save b0 in case of nested fault) | 611 | mov r29=b0 // save b0 in case of nested fault) |
631 | #ifdef CONFIG_SMP | 612 | #ifdef CONFIG_SMP |
632 | mov r28=ar.ccv // save ar.ccv | 613 | mov r28=ar.ccv // save ar.ccv |
@@ -642,7 +623,7 @@ ENTRY(iaccess_bit) | |||
642 | ;; | 623 | ;; |
643 | (p6) cmp.eq p6,p7=r26,r18 // Only if page present | 624 | (p6) cmp.eq p6,p7=r26,r18 // Only if page present |
644 | ;; | 625 | ;; |
645 | (p6) itc.i r25 // install updated PTE | 626 | ITC_I(p6, r25, r26) // install updated PTE |
646 | ;; | 627 | ;; |
647 | /* | 628 | /* |
648 | * Tell the assemblers dependency-violation checker that the above "itc" instructions | 629 | * Tell the assemblers dependency-violation checker that the above "itc" instructions |
@@ -668,7 +649,7 @@ ENTRY(iaccess_bit) | |||
668 | itc.i r18 // install updated PTE | 649 | itc.i r18 // install updated PTE |
669 | #endif /* !CONFIG_SMP */ | 650 | #endif /* !CONFIG_SMP */ |
670 | mov pr=r31,-1 | 651 | mov pr=r31,-1 |
671 | rfi | 652 | RFI |
672 | END(iaccess_bit) | 653 | END(iaccess_bit) |
673 | 654 | ||
674 | .org ia64_ivt+0x2800 | 655 | .org ia64_ivt+0x2800 |
@@ -677,10 +658,10 @@ END(iaccess_bit) | |||
677 | ENTRY(daccess_bit) | 658 | ENTRY(daccess_bit) |
678 | DBG_FAULT(10) | 659 | DBG_FAULT(10) |
679 | // Like Entry 8, except for data access | 660 | // Like Entry 8, except for data access |
680 | mov r16=cr.ifa // get the address that caused the fault | 661 | MOV_FROM_IFA(r16) // get the address that caused the fault |
681 | movl r30=1f // load continuation point in case of nested fault | 662 | movl r30=1f // load continuation point in case of nested fault |
682 | ;; | 663 | ;; |
683 | thash r17=r16 // compute virtual address of L3 PTE | 664 | THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE |
684 | mov r31=pr | 665 | mov r31=pr |
685 | mov r29=b0 // save b0 in case of nested fault) | 666 | mov r29=b0 // save b0 in case of nested fault) |
686 | #ifdef CONFIG_SMP | 667 | #ifdef CONFIG_SMP |
@@ -697,7 +678,7 @@ ENTRY(daccess_bit) | |||
697 | ;; | 678 | ;; |
698 | (p6) cmp.eq p6,p7=r26,r18 // Only if page is present | 679 | (p6) cmp.eq p6,p7=r26,r18 // Only if page is present |
699 | ;; | 680 | ;; |
700 | (p6) itc.d r25 // install updated PTE | 681 | ITC_D(p6, r25, r26) // install updated PTE |
701 | /* | 682 | /* |
702 | * Tell the assemblers dependency-violation checker that the above "itc" instructions | 683 | * Tell the assemblers dependency-violation checker that the above "itc" instructions |
703 | * cannot possibly affect the following loads: | 684 | * cannot possibly affect the following loads: |
@@ -721,7 +702,7 @@ ENTRY(daccess_bit) | |||
721 | #endif | 702 | #endif |
722 | mov b0=r29 // restore b0 | 703 | mov b0=r29 // restore b0 |
723 | mov pr=r31,-1 | 704 | mov pr=r31,-1 |
724 | rfi | 705 | RFI |
725 | END(daccess_bit) | 706 | END(daccess_bit) |
726 | 707 | ||
727 | .org ia64_ivt+0x2c00 | 708 | .org ia64_ivt+0x2c00 |
@@ -745,10 +726,10 @@ ENTRY(break_fault) | |||
745 | */ | 726 | */ |
746 | DBG_FAULT(11) | 727 | DBG_FAULT(11) |
747 | mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc) | 728 | mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc) |
748 | mov r29=cr.ipsr // M2 (12 cyc) | 729 | MOV_FROM_IPSR(p0, r29) // M2 (12 cyc) |
749 | mov r31=pr // I0 (2 cyc) | 730 | mov r31=pr // I0 (2 cyc) |
750 | 731 | ||
751 | mov r17=cr.iim // M2 (2 cyc) | 732 | MOV_FROM_IIM(r17) // M2 (2 cyc) |
752 | mov.m r27=ar.rsc // M2 (12 cyc) | 733 | mov.m r27=ar.rsc // M2 (12 cyc) |
753 | mov r18=__IA64_BREAK_SYSCALL // A | 734 | mov r18=__IA64_BREAK_SYSCALL // A |
754 | 735 | ||
@@ -767,7 +748,7 @@ ENTRY(break_fault) | |||
767 | nop.m 0 | 748 | nop.m 0 |
768 | movl r30=sys_call_table // X | 749 | movl r30=sys_call_table // X |
769 | 750 | ||
770 | mov r28=cr.iip // M2 (2 cyc) | 751 | MOV_FROM_IIP(r28) // M2 (2 cyc) |
771 | cmp.eq p0,p7=r18,r17 // I0 is this a system call? | 752 | cmp.eq p0,p7=r18,r17 // I0 is this a system call? |
772 | (p7) br.cond.spnt non_syscall // B no -> | 753 | (p7) br.cond.spnt non_syscall // B no -> |
773 | // | 754 | // |
@@ -864,18 +845,17 @@ ENTRY(break_fault) | |||
864 | #endif | 845 | #endif |
865 | mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 | 846 | mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 |
866 | nop 0 | 847 | nop 0 |
867 | bsw.1 // B (6 cyc) regs are saved, switch to bank 1 | 848 | BSW_1(r2, r14) // B (6 cyc) regs are saved, switch to bank 1 |
868 | ;; | 849 | ;; |
869 | 850 | ||
870 | ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection | 851 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r16) // M2 now it's safe to re-enable intr.-collection |
852 | // M0 ensure interruption collection is on | ||
871 | movl r3=ia64_ret_from_syscall // X | 853 | movl r3=ia64_ret_from_syscall // X |
872 | ;; | 854 | ;; |
873 | |||
874 | srlz.i // M0 ensure interruption collection is on | ||
875 | mov rp=r3 // I0 set the real return addr | 855 | mov rp=r3 // I0 set the real return addr |
876 | (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT | 856 | (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT |
877 | 857 | ||
878 | (p15) ssm psr.i // M2 restore psr.i | 858 | SSM_PSR_I(p15, p15, r16) // M2 restore psr.i |
879 | (p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr) | 859 | (p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr) |
880 | br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic | 860 | br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic |
881 | // NOT REACHED | 861 | // NOT REACHED |
@@ -895,27 +875,8 @@ END(break_fault) | |||
895 | ///////////////////////////////////////////////////////////////////////////////////////// | 875 | ///////////////////////////////////////////////////////////////////////////////////////// |
896 | // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) | 876 | // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) |
897 | ENTRY(interrupt) | 877 | ENTRY(interrupt) |
898 | DBG_FAULT(12) | 878 | /* interrupt handler has become too big to fit this area. */ |
899 | mov r31=pr // prepare to save predicates | 879 | br.sptk.many __interrupt |
900 | ;; | ||
901 | SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 | ||
902 | ssm psr.ic | PSR_DEFAULT_BITS | ||
903 | ;; | ||
904 | adds r3=8,r2 // set up second base pointer for SAVE_REST | ||
905 | srlz.i // ensure everybody knows psr.ic is back on | ||
906 | ;; | ||
907 | SAVE_REST | ||
908 | ;; | ||
909 | MCA_RECOVER_RANGE(interrupt) | ||
910 | alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group | ||
911 | mov out0=cr.ivr // pass cr.ivr as first arg | ||
912 | add out1=16,sp // pass pointer to pt_regs as second arg | ||
913 | ;; | ||
914 | srlz.d // make sure we see the effect of cr.ivr | ||
915 | movl r14=ia64_leave_kernel | ||
916 | ;; | ||
917 | mov rp=r14 | ||
918 | br.call.sptk.many b6=ia64_handle_irq | ||
919 | END(interrupt) | 880 | END(interrupt) |
920 | 881 | ||
921 | .org ia64_ivt+0x3400 | 882 | .org ia64_ivt+0x3400 |
@@ -978,6 +939,7 @@ END(interrupt) | |||
978 | * - ar.fpsr: set to kernel settings | 939 | * - ar.fpsr: set to kernel settings |
979 | * - b6: preserved (same as on entry) | 940 | * - b6: preserved (same as on entry) |
980 | */ | 941 | */ |
942 | #ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE | ||
981 | GLOBAL_ENTRY(ia64_syscall_setup) | 943 | GLOBAL_ENTRY(ia64_syscall_setup) |
982 | #if PT(B6) != 0 | 944 | #if PT(B6) != 0 |
983 | # error This code assumes that b6 is the first field in pt_regs. | 945 | # error This code assumes that b6 is the first field in pt_regs. |
@@ -1069,6 +1031,7 @@ GLOBAL_ENTRY(ia64_syscall_setup) | |||
1069 | (p10) mov r8=-EINVAL | 1031 | (p10) mov r8=-EINVAL |
1070 | br.ret.sptk.many b7 | 1032 | br.ret.sptk.many b7 |
1071 | END(ia64_syscall_setup) | 1033 | END(ia64_syscall_setup) |
1034 | #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ | ||
1072 | 1035 | ||
1073 | .org ia64_ivt+0x3c00 | 1036 | .org ia64_ivt+0x3c00 |
1074 | ///////////////////////////////////////////////////////////////////////////////////////// | 1037 | ///////////////////////////////////////////////////////////////////////////////////////// |
@@ -1082,7 +1045,7 @@ END(ia64_syscall_setup) | |||
1082 | DBG_FAULT(16) | 1045 | DBG_FAULT(16) |
1083 | FAULT(16) | 1046 | FAULT(16) |
1084 | 1047 | ||
1085 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 1048 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE) |
1086 | /* | 1049 | /* |
1087 | * There is no particular reason for this code to be here, other than | 1050 | * There is no particular reason for this code to be here, other than |
1088 | * that there happens to be space here that would go unused otherwise. | 1051 | * that there happens to be space here that would go unused otherwise. |
@@ -1092,7 +1055,7 @@ END(ia64_syscall_setup) | |||
1092 | * account_sys_enter is called from SAVE_MIN* macros if accounting is | 1055 | * account_sys_enter is called from SAVE_MIN* macros if accounting is |
1093 | * enabled and if the macro is entered from user mode. | 1056 | * enabled and if the macro is entered from user mode. |
1094 | */ | 1057 | */ |
1095 | ENTRY(account_sys_enter) | 1058 | GLOBAL_ENTRY(account_sys_enter) |
1096 | // mov.m r20=ar.itc is called in advance, and r13 is current | 1059 | // mov.m r20=ar.itc is called in advance, and r13 is current |
1097 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 | 1060 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 |
1098 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 | 1061 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 |
@@ -1123,110 +1086,18 @@ END(account_sys_enter) | |||
1123 | DBG_FAULT(17) | 1086 | DBG_FAULT(17) |
1124 | FAULT(17) | 1087 | FAULT(17) |
1125 | 1088 | ||
1126 | ENTRY(non_syscall) | ||
1127 | mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER | ||
1128 | ;; | ||
1129 | SAVE_MIN_WITH_COVER | ||
1130 | |||
1131 | // There is no particular reason for this code to be here, other than that | ||
1132 | // there happens to be space here that would go unused otherwise. If this | ||
1133 | // fault ever gets "unreserved", simply moved the following code to a more | ||
1134 | // suitable spot... | ||
1135 | |||
1136 | alloc r14=ar.pfs,0,0,2,0 | ||
1137 | mov out0=cr.iim | ||
1138 | add out1=16,sp | ||
1139 | adds r3=8,r2 // set up second base pointer for SAVE_REST | ||
1140 | |||
1141 | ssm psr.ic | PSR_DEFAULT_BITS | ||
1142 | ;; | ||
1143 | srlz.i // guarantee that interruption collection is on | ||
1144 | ;; | ||
1145 | (p15) ssm psr.i // restore psr.i | ||
1146 | movl r15=ia64_leave_kernel | ||
1147 | ;; | ||
1148 | SAVE_REST | ||
1149 | mov rp=r15 | ||
1150 | ;; | ||
1151 | br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr | ||
1152 | END(non_syscall) | ||
1153 | |||
1154 | .org ia64_ivt+0x4800 | 1089 | .org ia64_ivt+0x4800 |
1155 | ///////////////////////////////////////////////////////////////////////////////////////// | 1090 | ///////////////////////////////////////////////////////////////////////////////////////// |
1156 | // 0x4800 Entry 18 (size 64 bundles) Reserved | 1091 | // 0x4800 Entry 18 (size 64 bundles) Reserved |
1157 | DBG_FAULT(18) | 1092 | DBG_FAULT(18) |
1158 | FAULT(18) | 1093 | FAULT(18) |
1159 | 1094 | ||
1160 | /* | ||
1161 | * There is no particular reason for this code to be here, other than that | ||
1162 | * there happens to be space here that would go unused otherwise. If this | ||
1163 | * fault ever gets "unreserved", simply moved the following code to a more | ||
1164 | * suitable spot... | ||
1165 | */ | ||
1166 | |||
1167 | ENTRY(dispatch_unaligned_handler) | ||
1168 | SAVE_MIN_WITH_COVER | ||
1169 | ;; | ||
1170 | alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) | ||
1171 | mov out0=cr.ifa | ||
1172 | adds out1=16,sp | ||
1173 | |||
1174 | ssm psr.ic | PSR_DEFAULT_BITS | ||
1175 | ;; | ||
1176 | srlz.i // guarantee that interruption collection is on | ||
1177 | ;; | ||
1178 | (p15) ssm psr.i // restore psr.i | ||
1179 | adds r3=8,r2 // set up second base pointer | ||
1180 | ;; | ||
1181 | SAVE_REST | ||
1182 | movl r14=ia64_leave_kernel | ||
1183 | ;; | ||
1184 | mov rp=r14 | ||
1185 | br.sptk.many ia64_prepare_handle_unaligned | ||
1186 | END(dispatch_unaligned_handler) | ||
1187 | |||
1188 | .org ia64_ivt+0x4c00 | 1095 | .org ia64_ivt+0x4c00 |
1189 | ///////////////////////////////////////////////////////////////////////////////////////// | 1096 | ///////////////////////////////////////////////////////////////////////////////////////// |
1190 | // 0x4c00 Entry 19 (size 64 bundles) Reserved | 1097 | // 0x4c00 Entry 19 (size 64 bundles) Reserved |
1191 | DBG_FAULT(19) | 1098 | DBG_FAULT(19) |
1192 | FAULT(19) | 1099 | FAULT(19) |
1193 | 1100 | ||
1194 | /* | ||
1195 | * There is no particular reason for this code to be here, other than that | ||
1196 | * there happens to be space here that would go unused otherwise. If this | ||
1197 | * fault ever gets "unreserved", simply moved the following code to a more | ||
1198 | * suitable spot... | ||
1199 | */ | ||
1200 | |||
1201 | ENTRY(dispatch_to_fault_handler) | ||
1202 | /* | ||
1203 | * Input: | ||
1204 | * psr.ic: off | ||
1205 | * r19: fault vector number (e.g., 24 for General Exception) | ||
1206 | * r31: contains saved predicates (pr) | ||
1207 | */ | ||
1208 | SAVE_MIN_WITH_COVER_R19 | ||
1209 | alloc r14=ar.pfs,0,0,5,0 | ||
1210 | mov out0=r15 | ||
1211 | mov out1=cr.isr | ||
1212 | mov out2=cr.ifa | ||
1213 | mov out3=cr.iim | ||
1214 | mov out4=cr.itir | ||
1215 | ;; | ||
1216 | ssm psr.ic | PSR_DEFAULT_BITS | ||
1217 | ;; | ||
1218 | srlz.i // guarantee that interruption collection is on | ||
1219 | ;; | ||
1220 | (p15) ssm psr.i // restore psr.i | ||
1221 | adds r3=8,r2 // set up second base pointer for SAVE_REST | ||
1222 | ;; | ||
1223 | SAVE_REST | ||
1224 | movl r14=ia64_leave_kernel | ||
1225 | ;; | ||
1226 | mov rp=r14 | ||
1227 | br.call.sptk.many b6=ia64_fault | ||
1228 | END(dispatch_to_fault_handler) | ||
1229 | |||
1230 | // | 1101 | // |
1231 | // --- End of long entries, Beginning of short entries | 1102 | // --- End of long entries, Beginning of short entries |
1232 | // | 1103 | // |
@@ -1236,8 +1107,8 @@ END(dispatch_to_fault_handler) | |||
1236 | // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49) | 1107 | // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49) |
1237 | ENTRY(page_not_present) | 1108 | ENTRY(page_not_present) |
1238 | DBG_FAULT(20) | 1109 | DBG_FAULT(20) |
1239 | mov r16=cr.ifa | 1110 | MOV_FROM_IFA(r16) |
1240 | rsm psr.dt | 1111 | RSM_PSR_DT |
1241 | /* | 1112 | /* |
1242 | * The Linux page fault handler doesn't expect non-present pages to be in | 1113 | * The Linux page fault handler doesn't expect non-present pages to be in |
1243 | * the TLB. Flush the existing entry now, so we meet that expectation. | 1114 | * the TLB. Flush the existing entry now, so we meet that expectation. |
@@ -1256,8 +1127,8 @@ END(page_not_present) | |||
1256 | // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52) | 1127 | // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52) |
1257 | ENTRY(key_permission) | 1128 | ENTRY(key_permission) |
1258 | DBG_FAULT(21) | 1129 | DBG_FAULT(21) |
1259 | mov r16=cr.ifa | 1130 | MOV_FROM_IFA(r16) |
1260 | rsm psr.dt | 1131 | RSM_PSR_DT |
1261 | mov r31=pr | 1132 | mov r31=pr |
1262 | ;; | 1133 | ;; |
1263 | srlz.d | 1134 | srlz.d |
@@ -1269,8 +1140,8 @@ END(key_permission) | |||
1269 | // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) | 1140 | // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) |
1270 | ENTRY(iaccess_rights) | 1141 | ENTRY(iaccess_rights) |
1271 | DBG_FAULT(22) | 1142 | DBG_FAULT(22) |
1272 | mov r16=cr.ifa | 1143 | MOV_FROM_IFA(r16) |
1273 | rsm psr.dt | 1144 | RSM_PSR_DT |
1274 | mov r31=pr | 1145 | mov r31=pr |
1275 | ;; | 1146 | ;; |
1276 | srlz.d | 1147 | srlz.d |
@@ -1282,8 +1153,8 @@ END(iaccess_rights) | |||
1282 | // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) | 1153 | // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) |
1283 | ENTRY(daccess_rights) | 1154 | ENTRY(daccess_rights) |
1284 | DBG_FAULT(23) | 1155 | DBG_FAULT(23) |
1285 | mov r16=cr.ifa | 1156 | MOV_FROM_IFA(r16) |
1286 | rsm psr.dt | 1157 | RSM_PSR_DT |
1287 | mov r31=pr | 1158 | mov r31=pr |
1288 | ;; | 1159 | ;; |
1289 | srlz.d | 1160 | srlz.d |
@@ -1295,7 +1166,7 @@ END(daccess_rights) | |||
1295 | // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) | 1166 | // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) |
1296 | ENTRY(general_exception) | 1167 | ENTRY(general_exception) |
1297 | DBG_FAULT(24) | 1168 | DBG_FAULT(24) |
1298 | mov r16=cr.isr | 1169 | MOV_FROM_ISR(r16) |
1299 | mov r31=pr | 1170 | mov r31=pr |
1300 | ;; | 1171 | ;; |
1301 | cmp4.eq p6,p0=0,r16 | 1172 | cmp4.eq p6,p0=0,r16 |
@@ -1324,8 +1195,8 @@ END(disabled_fp_reg) | |||
1324 | ENTRY(nat_consumption) | 1195 | ENTRY(nat_consumption) |
1325 | DBG_FAULT(26) | 1196 | DBG_FAULT(26) |
1326 | 1197 | ||
1327 | mov r16=cr.ipsr | 1198 | MOV_FROM_IPSR(p0, r16) |
1328 | mov r17=cr.isr | 1199 | MOV_FROM_ISR(r17) |
1329 | mov r31=pr // save PR | 1200 | mov r31=pr // save PR |
1330 | ;; | 1201 | ;; |
1331 | and r18=0xf,r17 // r18 = cr.ipsr.code{3:0} | 1202 | and r18=0xf,r17 // r18 = cr.ipsr.code{3:0} |
@@ -1335,10 +1206,10 @@ ENTRY(nat_consumption) | |||
1335 | dep r16=-1,r16,IA64_PSR_ED_BIT,1 | 1206 | dep r16=-1,r16,IA64_PSR_ED_BIT,1 |
1336 | (p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH) | 1207 | (p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH) |
1337 | ;; | 1208 | ;; |
1338 | mov cr.ipsr=r16 // set cr.ipsr.na | 1209 | MOV_TO_IPSR(p0, r16, r18) |
1339 | mov pr=r31,-1 | 1210 | mov pr=r31,-1 |
1340 | ;; | 1211 | ;; |
1341 | rfi | 1212 | RFI |
1342 | 1213 | ||
1343 | 1: mov pr=r31,-1 | 1214 | 1: mov pr=r31,-1 |
1344 | ;; | 1215 | ;; |
@@ -1360,26 +1231,26 @@ ENTRY(speculation_vector) | |||
1360 | * | 1231 | * |
1361 | * cr.imm contains zero_ext(imm21) | 1232 | * cr.imm contains zero_ext(imm21) |
1362 | */ | 1233 | */ |
1363 | mov r18=cr.iim | 1234 | MOV_FROM_IIM(r18) |
1364 | ;; | 1235 | ;; |
1365 | mov r17=cr.iip | 1236 | MOV_FROM_IIP(r17) |
1366 | shl r18=r18,43 // put sign bit in position (43=64-21) | 1237 | shl r18=r18,43 // put sign bit in position (43=64-21) |
1367 | ;; | 1238 | ;; |
1368 | 1239 | ||
1369 | mov r16=cr.ipsr | 1240 | MOV_FROM_IPSR(p0, r16) |
1370 | shr r18=r18,39 // sign extend (39=43-4) | 1241 | shr r18=r18,39 // sign extend (39=43-4) |
1371 | ;; | 1242 | ;; |
1372 | 1243 | ||
1373 | add r17=r17,r18 // now add the offset | 1244 | add r17=r17,r18 // now add the offset |
1374 | ;; | 1245 | ;; |
1375 | mov cr.iip=r17 | 1246 | MOV_FROM_IIP(r17) |
1376 | dep r16=0,r16,41,2 // clear EI | 1247 | dep r16=0,r16,41,2 // clear EI |
1377 | ;; | 1248 | ;; |
1378 | 1249 | ||
1379 | mov cr.ipsr=r16 | 1250 | MOV_FROM_IPSR(p0, r16) |
1380 | ;; | 1251 | ;; |
1381 | 1252 | ||
1382 | rfi // and go back | 1253 | RFI |
1383 | END(speculation_vector) | 1254 | END(speculation_vector) |
1384 | 1255 | ||
1385 | .org ia64_ivt+0x5800 | 1256 | .org ia64_ivt+0x5800 |
@@ -1517,11 +1388,11 @@ ENTRY(ia32_intercept) | |||
1517 | DBG_FAULT(46) | 1388 | DBG_FAULT(46) |
1518 | #ifdef CONFIG_IA32_SUPPORT | 1389 | #ifdef CONFIG_IA32_SUPPORT |
1519 | mov r31=pr | 1390 | mov r31=pr |
1520 | mov r16=cr.isr | 1391 | MOV_FROM_ISR(r16) |
1521 | ;; | 1392 | ;; |
1522 | extr.u r17=r16,16,8 // get ISR.code | 1393 | extr.u r17=r16,16,8 // get ISR.code |
1523 | mov r18=ar.eflag | 1394 | mov r18=ar.eflag |
1524 | mov r19=cr.iim // old eflag value | 1395 | MOV_FROM_IIM(r19) // old eflag value |
1525 | ;; | 1396 | ;; |
1526 | cmp.ne p6,p0=2,r17 | 1397 | cmp.ne p6,p0=2,r17 |
1527 | (p6) br.cond.spnt 1f // not a system flag fault | 1398 | (p6) br.cond.spnt 1f // not a system flag fault |
@@ -1533,7 +1404,7 @@ ENTRY(ia32_intercept) | |||
1533 | (p6) br.cond.spnt 1f // eflags.ac bit didn't change | 1404 | (p6) br.cond.spnt 1f // eflags.ac bit didn't change |
1534 | ;; | 1405 | ;; |
1535 | mov pr=r31,-1 // restore predicate registers | 1406 | mov pr=r31,-1 // restore predicate registers |
1536 | rfi | 1407 | RFI |
1537 | 1408 | ||
1538 | 1: | 1409 | 1: |
1539 | #endif // CONFIG_IA32_SUPPORT | 1410 | #endif // CONFIG_IA32_SUPPORT |
@@ -1673,6 +1544,137 @@ END(ia32_interrupt) | |||
1673 | DBG_FAULT(67) | 1544 | DBG_FAULT(67) |
1674 | FAULT(67) | 1545 | FAULT(67) |
1675 | 1546 | ||
1547 | //----------------------------------------------------------------------------------- | ||
1548 | // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address) | ||
1549 | ENTRY(page_fault) | ||
1550 | SSM_PSR_DT_AND_SRLZ_I | ||
1551 | ;; | ||
1552 | SAVE_MIN_WITH_COVER | ||
1553 | alloc r15=ar.pfs,0,0,3,0 | ||
1554 | MOV_FROM_IFA(out0) | ||
1555 | MOV_FROM_ISR(out1) | ||
1556 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r14, r3) | ||
1557 | adds r3=8,r2 // set up second base pointer | ||
1558 | SSM_PSR_I(p15, p15, r14) // restore psr.i | ||
1559 | movl r14=ia64_leave_kernel | ||
1560 | ;; | ||
1561 | SAVE_REST | ||
1562 | mov rp=r14 | ||
1563 | ;; | ||
1564 | adds out2=16,r12 // out2 = pointer to pt_regs | ||
1565 | br.call.sptk.many b6=ia64_do_page_fault // ignore return address | ||
1566 | END(page_fault) | ||
1567 | |||
1568 | ENTRY(non_syscall) | ||
1569 | mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER | ||
1570 | ;; | ||
1571 | SAVE_MIN_WITH_COVER | ||
1572 | |||
1573 | // There is no particular reason for this code to be here, other than that | ||
1574 | // there happens to be space here that would go unused otherwise. If this | ||
1575 | // fault ever gets "unreserved", simply moved the following code to a more | ||
1576 | // suitable spot... | ||
1577 | |||
1578 | alloc r14=ar.pfs,0,0,2,0 | ||
1579 | MOV_FROM_IIM(out0) | ||
1580 | add out1=16,sp | ||
1581 | adds r3=8,r2 // set up second base pointer for SAVE_REST | ||
1582 | |||
1583 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r15, r24) | ||
1584 | // guarantee that interruption collection is on | ||
1585 | SSM_PSR_I(p15, p15, r15) // restore psr.i | ||
1586 | movl r15=ia64_leave_kernel | ||
1587 | ;; | ||
1588 | SAVE_REST | ||
1589 | mov rp=r15 | ||
1590 | ;; | ||
1591 | br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr | ||
1592 | END(non_syscall) | ||
1593 | |||
1594 | ENTRY(__interrupt) | ||
1595 | DBG_FAULT(12) | ||
1596 | mov r31=pr // prepare to save predicates | ||
1597 | ;; | ||
1598 | SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 | ||
1599 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r14) | ||
1600 | // ensure everybody knows psr.ic is back on | ||
1601 | adds r3=8,r2 // set up second base pointer for SAVE_REST | ||
1602 | ;; | ||
1603 | SAVE_REST | ||
1604 | ;; | ||
1605 | MCA_RECOVER_RANGE(interrupt) | ||
1606 | alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group | ||
1607 | MOV_FROM_IVR(out0, r8) // pass cr.ivr as first arg | ||
1608 | add out1=16,sp // pass pointer to pt_regs as second arg | ||
1609 | ;; | ||
1610 | srlz.d // make sure we see the effect of cr.ivr | ||
1611 | movl r14=ia64_leave_kernel | ||
1612 | ;; | ||
1613 | mov rp=r14 | ||
1614 | br.call.sptk.many b6=ia64_handle_irq | ||
1615 | END(__interrupt) | ||
1616 | |||
1617 | /* | ||
1618 | * There is no particular reason for this code to be here, other than that | ||
1619 | * there happens to be space here that would go unused otherwise. If this | ||
1620 | * fault ever gets "unreserved", simply moved the following code to a more | ||
1621 | * suitable spot... | ||
1622 | */ | ||
1623 | |||
1624 | ENTRY(dispatch_unaligned_handler) | ||
1625 | SAVE_MIN_WITH_COVER | ||
1626 | ;; | ||
1627 | alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) | ||
1628 | MOV_FROM_IFA(out0) | ||
1629 | adds out1=16,sp | ||
1630 | |||
1631 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24) | ||
1632 | // guarantee that interruption collection is on | ||
1633 | SSM_PSR_I(p15, p15, r3) // restore psr.i | ||
1634 | adds r3=8,r2 // set up second base pointer | ||
1635 | ;; | ||
1636 | SAVE_REST | ||
1637 | movl r14=ia64_leave_kernel | ||
1638 | ;; | ||
1639 | mov rp=r14 | ||
1640 | br.sptk.many ia64_prepare_handle_unaligned | ||
1641 | END(dispatch_unaligned_handler) | ||
1642 | |||
1643 | /* | ||
1644 | * There is no particular reason for this code to be here, other than that | ||
1645 | * there happens to be space here that would go unused otherwise. If this | ||
1646 | * fault ever gets "unreserved", simply moved the following code to a more | ||
1647 | * suitable spot... | ||
1648 | */ | ||
1649 | |||
1650 | ENTRY(dispatch_to_fault_handler) | ||
1651 | /* | ||
1652 | * Input: | ||
1653 | * psr.ic: off | ||
1654 | * r19: fault vector number (e.g., 24 for General Exception) | ||
1655 | * r31: contains saved predicates (pr) | ||
1656 | */ | ||
1657 | SAVE_MIN_WITH_COVER_R19 | ||
1658 | alloc r14=ar.pfs,0,0,5,0 | ||
1659 | MOV_FROM_ISR(out1) | ||
1660 | MOV_FROM_IFA(out2) | ||
1661 | MOV_FROM_IIM(out3) | ||
1662 | MOV_FROM_ITIR(out4) | ||
1663 | ;; | ||
1664 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, out0) | ||
1665 | // guarantee that interruption collection is on | ||
1666 | mov out0=r15 | ||
1667 | ;; | ||
1668 | SSM_PSR_I(p15, p15, r3) // restore psr.i | ||
1669 | adds r3=8,r2 // set up second base pointer for SAVE_REST | ||
1670 | ;; | ||
1671 | SAVE_REST | ||
1672 | movl r14=ia64_leave_kernel | ||
1673 | ;; | ||
1674 | mov rp=r14 | ||
1675 | br.call.sptk.many b6=ia64_fault | ||
1676 | END(dispatch_to_fault_handler) | ||
1677 | |||
1676 | /* | 1678 | /* |
1677 | * Squatting in this space ... | 1679 | * Squatting in this space ... |
1678 | * | 1680 | * |
@@ -1686,11 +1688,10 @@ ENTRY(dispatch_illegal_op_fault) | |||
1686 | .prologue | 1688 | .prologue |
1687 | .body | 1689 | .body |
1688 | SAVE_MIN_WITH_COVER | 1690 | SAVE_MIN_WITH_COVER |
1689 | ssm psr.ic | PSR_DEFAULT_BITS | 1691 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24) |
1690 | ;; | 1692 | // guarantee that interruption collection is on |
1691 | srlz.i // guarantee that interruption collection is on | ||
1692 | ;; | 1693 | ;; |
1693 | (p15) ssm psr.i // restore psr.i | 1694 | SSM_PSR_I(p15, p15, r3) // restore psr.i |
1694 | adds r3=8,r2 // set up second base pointer for SAVE_REST | 1695 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
1695 | ;; | 1696 | ;; |
1696 | alloc r14=ar.pfs,0,0,1,0 // must be first in insn group | 1697 | alloc r14=ar.pfs,0,0,1,0 // must be first in insn group |
@@ -1729,12 +1730,11 @@ END(dispatch_illegal_op_fault) | |||
1729 | ENTRY(dispatch_to_ia32_handler) | 1730 | ENTRY(dispatch_to_ia32_handler) |
1730 | SAVE_MIN | 1731 | SAVE_MIN |
1731 | ;; | 1732 | ;; |
1732 | mov r14=cr.isr | 1733 | MOV_FROM_ISR(r14) |
1733 | ssm psr.ic | PSR_DEFAULT_BITS | 1734 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24) |
1734 | ;; | 1735 | // guarantee that interruption collection is on |
1735 | srlz.i // guarantee that interruption collection is on | ||
1736 | ;; | 1736 | ;; |
1737 | (p15) ssm psr.i | 1737 | SSM_PSR_I(p15, p15, r3) |
1738 | adds r3=8,r2 // Base pointer for SAVE_REST | 1738 | adds r3=8,r2 // Base pointer for SAVE_REST |
1739 | ;; | 1739 | ;; |
1740 | SAVE_REST | 1740 | SAVE_REST |