diff options
author | Isaku Yamahata <yamahata@valinux.co.jp> | 2008-05-19 09:13:38 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2008-05-27 18:03:29 -0400 |
commit | 498c5170472ff0c03a29d22dbd33225a0be038f4 (patch) | |
tree | e1f972fcbf3dc96219736723a1ff78452c5747e1 | |
parent | 02e32e36f42f8ea7ee6060d02f2d69ad5bad6d50 (diff) |
[IA64] pvops: paravirtualize ivt.S
paravirtualize ivt.S which implements fault handler in hand written
assembly code.
They includes sensitive or performance critical privileged instructions.
So they need paravirtualization.
Cc: Keith Owens <kaos@ocs.com.au>
Cc: tgingold@free.fr
Cc: Akio Takebe <takebe_akio@jp.fujitsu.com>
Signed-off-by: Yaozu (Eddie) Dong <eddie.dong@intel.com>
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r-- | arch/ia64/kernel/ivt.S | 249 |
1 files changed, 122 insertions, 127 deletions
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 80b44ea052d7..23749ed3cf08 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
@@ -12,6 +12,14 @@ | |||
12 | * | 12 | * |
13 | * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP | 13 | * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP |
14 | * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT. | 14 | * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT. |
15 | * | ||
16 | * Copyright (C) 2005 Hewlett-Packard Co | ||
17 | * Dan Magenheimer <dan.magenheimer@hp.com> | ||
18 | * Xen paravirtualization | ||
19 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
20 | * VA Linux Systems Japan K.K. | ||
21 | * pv_ops. | ||
22 | * Yaozu (Eddie) Dong <eddie.dong@intel.com> | ||
15 | */ | 23 | */ |
16 | /* | 24 | /* |
17 | * This file defines the interruption vector table used by the CPU. | 25 | * This file defines the interruption vector table used by the CPU. |
@@ -102,13 +110,13 @@ ENTRY(vhpt_miss) | |||
102 | * - the faulting virtual address uses unimplemented address bits | 110 | * - the faulting virtual address uses unimplemented address bits |
103 | * - the faulting virtual address has no valid page table mapping | 111 | * - the faulting virtual address has no valid page table mapping |
104 | */ | 112 | */ |
105 | mov r16=cr.ifa // get address that caused the TLB miss | 113 | MOV_FROM_IFA(r16) // get address that caused the TLB miss |
106 | #ifdef CONFIG_HUGETLB_PAGE | 114 | #ifdef CONFIG_HUGETLB_PAGE |
107 | movl r18=PAGE_SHIFT | 115 | movl r18=PAGE_SHIFT |
108 | mov r25=cr.itir | 116 | MOV_FROM_ITIR(r25) |
109 | #endif | 117 | #endif |
110 | ;; | 118 | ;; |
111 | rsm psr.dt // use physical addressing for data | 119 | RSM_PSR_DT // use physical addressing for data |
112 | mov r31=pr // save the predicate registers | 120 | mov r31=pr // save the predicate registers |
113 | mov r19=IA64_KR(PT_BASE) // get page table base address | 121 | mov r19=IA64_KR(PT_BASE) // get page table base address |
114 | shl r21=r16,3 // shift bit 60 into sign bit | 122 | shl r21=r16,3 // shift bit 60 into sign bit |
@@ -168,21 +176,21 @@ ENTRY(vhpt_miss) | |||
168 | dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr) | 176 | dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr) |
169 | ;; | 177 | ;; |
170 | (p7) ld8 r18=[r21] // read *pte | 178 | (p7) ld8 r18=[r21] // read *pte |
171 | mov r19=cr.isr // cr.isr bit 32 tells us if this is an insn miss | 179 | MOV_FROM_ISR(r19) // cr.isr bit 32 tells us if this is an insn miss |
172 | ;; | 180 | ;; |
173 | (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? | 181 | (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? |
174 | mov r22=cr.iha // get the VHPT address that caused the TLB miss | 182 | MOV_FROM_IHA(r22) // get the VHPT address that caused the TLB miss |
175 | ;; // avoid RAW on p7 | 183 | ;; // avoid RAW on p7 |
176 | (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss? | 184 | (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss? |
177 | dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address | 185 | dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address |
178 | ;; | 186 | ;; |
179 | (p10) itc.i r18 // insert the instruction TLB entry | 187 | ITC_I_AND_D(p10, p11, r18, r24) // insert the instruction TLB entry and |
180 | (p11) itc.d r18 // insert the data TLB entry | 188 | // insert the data TLB entry |
181 | (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) | 189 | (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) |
182 | mov cr.ifa=r22 | 190 | MOV_TO_IFA(r22, r24) |
183 | 191 | ||
184 | #ifdef CONFIG_HUGETLB_PAGE | 192 | #ifdef CONFIG_HUGETLB_PAGE |
185 | (p8) mov cr.itir=r25 // change to default page-size for VHPT | 193 | MOV_TO_ITIR(p8, r25, r24) // change to default page-size for VHPT |
186 | #endif | 194 | #endif |
187 | 195 | ||
188 | /* | 196 | /* |
@@ -192,7 +200,7 @@ ENTRY(vhpt_miss) | |||
192 | */ | 200 | */ |
193 | adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23 | 201 | adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23 |
194 | ;; | 202 | ;; |
195 | (p7) itc.d r24 | 203 | ITC_D(p7, r24, r25) |
196 | ;; | 204 | ;; |
197 | #ifdef CONFIG_SMP | 205 | #ifdef CONFIG_SMP |
198 | /* | 206 | /* |
@@ -234,7 +242,7 @@ ENTRY(vhpt_miss) | |||
234 | #endif | 242 | #endif |
235 | 243 | ||
236 | mov pr=r31,-1 // restore predicate registers | 244 | mov pr=r31,-1 // restore predicate registers |
237 | rfi | 245 | RFI |
238 | END(vhpt_miss) | 246 | END(vhpt_miss) |
239 | 247 | ||
240 | .org ia64_ivt+0x400 | 248 | .org ia64_ivt+0x400 |
@@ -248,11 +256,11 @@ ENTRY(itlb_miss) | |||
248 | * mode, walk the page table, and then re-execute the PTE read and | 256 | * mode, walk the page table, and then re-execute the PTE read and |
249 | * go on normally after that. | 257 | * go on normally after that. |
250 | */ | 258 | */ |
251 | mov r16=cr.ifa // get virtual address | 259 | MOV_FROM_IFA(r16) // get virtual address |
252 | mov r29=b0 // save b0 | 260 | mov r29=b0 // save b0 |
253 | mov r31=pr // save predicates | 261 | mov r31=pr // save predicates |
254 | .itlb_fault: | 262 | .itlb_fault: |
255 | mov r17=cr.iha // get virtual address of PTE | 263 | MOV_FROM_IHA(r17) // get virtual address of PTE |
256 | movl r30=1f // load nested fault continuation point | 264 | movl r30=1f // load nested fault continuation point |
257 | ;; | 265 | ;; |
258 | 1: ld8 r18=[r17] // read *pte | 266 | 1: ld8 r18=[r17] // read *pte |
@@ -261,7 +269,7 @@ ENTRY(itlb_miss) | |||
261 | tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? | 269 | tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? |
262 | (p6) br.cond.spnt page_fault | 270 | (p6) br.cond.spnt page_fault |
263 | ;; | 271 | ;; |
264 | itc.i r18 | 272 | ITC_I(p0, r18, r19) |
265 | ;; | 273 | ;; |
266 | #ifdef CONFIG_SMP | 274 | #ifdef CONFIG_SMP |
267 | /* | 275 | /* |
@@ -278,7 +286,7 @@ ENTRY(itlb_miss) | |||
278 | (p7) ptc.l r16,r20 | 286 | (p7) ptc.l r16,r20 |
279 | #endif | 287 | #endif |
280 | mov pr=r31,-1 | 288 | mov pr=r31,-1 |
281 | rfi | 289 | RFI |
282 | END(itlb_miss) | 290 | END(itlb_miss) |
283 | 291 | ||
284 | .org ia64_ivt+0x0800 | 292 | .org ia64_ivt+0x0800 |
@@ -292,11 +300,11 @@ ENTRY(dtlb_miss) | |||
292 | * mode, walk the page table, and then re-execute the PTE read and | 300 | * mode, walk the page table, and then re-execute the PTE read and |
293 | * go on normally after that. | 301 | * go on normally after that. |
294 | */ | 302 | */ |
295 | mov r16=cr.ifa // get virtual address | 303 | MOV_FROM_IFA(r16) // get virtual address |
296 | mov r29=b0 // save b0 | 304 | mov r29=b0 // save b0 |
297 | mov r31=pr // save predicates | 305 | mov r31=pr // save predicates |
298 | dtlb_fault: | 306 | dtlb_fault: |
299 | mov r17=cr.iha // get virtual address of PTE | 307 | MOV_FROM_IHA(r17) // get virtual address of PTE |
300 | movl r30=1f // load nested fault continuation point | 308 | movl r30=1f // load nested fault continuation point |
301 | ;; | 309 | ;; |
302 | 1: ld8 r18=[r17] // read *pte | 310 | 1: ld8 r18=[r17] // read *pte |
@@ -305,7 +313,7 @@ dtlb_fault: | |||
305 | tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? | 313 | tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? |
306 | (p6) br.cond.spnt page_fault | 314 | (p6) br.cond.spnt page_fault |
307 | ;; | 315 | ;; |
308 | itc.d r18 | 316 | ITC_D(p0, r18, r19) |
309 | ;; | 317 | ;; |
310 | #ifdef CONFIG_SMP | 318 | #ifdef CONFIG_SMP |
311 | /* | 319 | /* |
@@ -322,7 +330,7 @@ dtlb_fault: | |||
322 | (p7) ptc.l r16,r20 | 330 | (p7) ptc.l r16,r20 |
323 | #endif | 331 | #endif |
324 | mov pr=r31,-1 | 332 | mov pr=r31,-1 |
325 | rfi | 333 | RFI |
326 | END(dtlb_miss) | 334 | END(dtlb_miss) |
327 | 335 | ||
328 | .org ia64_ivt+0x0c00 | 336 | .org ia64_ivt+0x0c00 |
@@ -330,9 +338,9 @@ END(dtlb_miss) | |||
330 | // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) | 338 | // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) |
331 | ENTRY(alt_itlb_miss) | 339 | ENTRY(alt_itlb_miss) |
332 | DBG_FAULT(3) | 340 | DBG_FAULT(3) |
333 | mov r16=cr.ifa // get address that caused the TLB miss | 341 | MOV_FROM_IFA(r16) // get address that caused the TLB miss |
334 | movl r17=PAGE_KERNEL | 342 | movl r17=PAGE_KERNEL |
335 | mov r21=cr.ipsr | 343 | MOV_FROM_IPSR(p0, r21) |
336 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) | 344 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
337 | mov r31=pr | 345 | mov r31=pr |
338 | ;; | 346 | ;; |
@@ -341,9 +349,9 @@ ENTRY(alt_itlb_miss) | |||
341 | ;; | 349 | ;; |
342 | cmp.gt p8,p0=6,r22 // user mode | 350 | cmp.gt p8,p0=6,r22 // user mode |
343 | ;; | 351 | ;; |
344 | (p8) thash r17=r16 | 352 | THASH(p8, r17, r16, r23) |
345 | ;; | 353 | ;; |
346 | (p8) mov cr.iha=r17 | 354 | MOV_TO_IHA(p8, r17, r23) |
347 | (p8) mov r29=b0 // save b0 | 355 | (p8) mov r29=b0 // save b0 |
348 | (p8) br.cond.dptk .itlb_fault | 356 | (p8) br.cond.dptk .itlb_fault |
349 | #endif | 357 | #endif |
@@ -358,9 +366,9 @@ ENTRY(alt_itlb_miss) | |||
358 | or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 | 366 | or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 |
359 | (p8) br.cond.spnt page_fault | 367 | (p8) br.cond.spnt page_fault |
360 | ;; | 368 | ;; |
361 | itc.i r19 // insert the TLB entry | 369 | ITC_I(p0, r19, r18) // insert the TLB entry |
362 | mov pr=r31,-1 | 370 | mov pr=r31,-1 |
363 | rfi | 371 | RFI |
364 | END(alt_itlb_miss) | 372 | END(alt_itlb_miss) |
365 | 373 | ||
366 | .org ia64_ivt+0x1000 | 374 | .org ia64_ivt+0x1000 |
@@ -368,11 +376,11 @@ END(alt_itlb_miss) | |||
368 | // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) | 376 | // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) |
369 | ENTRY(alt_dtlb_miss) | 377 | ENTRY(alt_dtlb_miss) |
370 | DBG_FAULT(4) | 378 | DBG_FAULT(4) |
371 | mov r16=cr.ifa // get address that caused the TLB miss | 379 | MOV_FROM_IFA(r16) // get address that caused the TLB miss |
372 | movl r17=PAGE_KERNEL | 380 | movl r17=PAGE_KERNEL |
373 | mov r20=cr.isr | 381 | MOV_FROM_ISR(r20) |
374 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) | 382 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
375 | mov r21=cr.ipsr | 383 | MOV_FROM_IPSR(p0, r21) |
376 | mov r31=pr | 384 | mov r31=pr |
377 | mov r24=PERCPU_ADDR | 385 | mov r24=PERCPU_ADDR |
378 | ;; | 386 | ;; |
@@ -381,9 +389,9 @@ ENTRY(alt_dtlb_miss) | |||
381 | ;; | 389 | ;; |
382 | cmp.gt p8,p0=6,r22 // access to region 0-5 | 390 | cmp.gt p8,p0=6,r22 // access to region 0-5 |
383 | ;; | 391 | ;; |
384 | (p8) thash r17=r16 | 392 | THASH(p8, r17, r16, r25) |
385 | ;; | 393 | ;; |
386 | (p8) mov cr.iha=r17 | 394 | MOV_TO_IHA(p8, r17, r25) |
387 | (p8) mov r29=b0 // save b0 | 395 | (p8) mov r29=b0 // save b0 |
388 | (p8) br.cond.dptk dtlb_fault | 396 | (p8) br.cond.dptk dtlb_fault |
389 | #endif | 397 | #endif |
@@ -402,7 +410,7 @@ ENTRY(alt_dtlb_miss) | |||
402 | tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? | 410 | tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? |
403 | ;; | 411 | ;; |
404 | (p10) sub r19=r19,r26 | 412 | (p10) sub r19=r19,r26 |
405 | (p10) mov cr.itir=r25 | 413 | MOV_TO_ITIR(p10, r25, r24) |
406 | cmp.ne p8,p0=r0,r23 | 414 | cmp.ne p8,p0=r0,r23 |
407 | (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field | 415 | (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field |
408 | (p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr | 416 | (p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr |
@@ -411,11 +419,11 @@ ENTRY(alt_dtlb_miss) | |||
411 | dep r21=-1,r21,IA64_PSR_ED_BIT,1 | 419 | dep r21=-1,r21,IA64_PSR_ED_BIT,1 |
412 | ;; | 420 | ;; |
413 | or r19=r19,r17 // insert PTE control bits into r19 | 421 | or r19=r19,r17 // insert PTE control bits into r19 |
414 | (p6) mov cr.ipsr=r21 | 422 | MOV_TO_IPSR(p6, r21, r24) |
415 | ;; | 423 | ;; |
416 | (p7) itc.d r19 // insert the TLB entry | 424 | ITC_D(p7, r19, r18) // insert the TLB entry |
417 | mov pr=r31,-1 | 425 | mov pr=r31,-1 |
418 | rfi | 426 | RFI |
419 | END(alt_dtlb_miss) | 427 | END(alt_dtlb_miss) |
420 | 428 | ||
421 | .org ia64_ivt+0x1400 | 429 | .org ia64_ivt+0x1400 |
@@ -444,10 +452,10 @@ ENTRY(nested_dtlb_miss) | |||
444 | * | 452 | * |
445 | * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared) | 453 | * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared) |
446 | */ | 454 | */ |
447 | rsm psr.dt // switch to using physical data addressing | 455 | RSM_PSR_DT // switch to using physical data addressing |
448 | mov r19=IA64_KR(PT_BASE) // get the page table base address | 456 | mov r19=IA64_KR(PT_BASE) // get the page table base address |
449 | shl r21=r16,3 // shift bit 60 into sign bit | 457 | shl r21=r16,3 // shift bit 60 into sign bit |
450 | mov r18=cr.itir | 458 | MOV_FROM_ITIR(r18) |
451 | ;; | 459 | ;; |
452 | shr.u r17=r16,61 // get the region number into r17 | 460 | shr.u r17=r16,61 // get the region number into r17 |
453 | extr.u r18=r18,2,6 // get the faulting page size | 461 | extr.u r18=r18,2,6 // get the faulting page size |
@@ -510,21 +518,15 @@ END(ikey_miss) | |||
510 | //----------------------------------------------------------------------------------- | 518 | //----------------------------------------------------------------------------------- |
511 | // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address) | 519 | // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address) |
512 | ENTRY(page_fault) | 520 | ENTRY(page_fault) |
513 | ssm psr.dt | 521 | SSM_PSR_DT_AND_SRLZ_I |
514 | ;; | ||
515 | srlz.i | ||
516 | ;; | 522 | ;; |
517 | SAVE_MIN_WITH_COVER | 523 | SAVE_MIN_WITH_COVER |
518 | alloc r15=ar.pfs,0,0,3,0 | 524 | alloc r15=ar.pfs,0,0,3,0 |
519 | mov out0=cr.ifa | 525 | MOV_FROM_IFA(out0) |
520 | mov out1=cr.isr | 526 | MOV_FROM_ISR(out1) |
527 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r14, r3) | ||
521 | adds r3=8,r2 // set up second base pointer | 528 | adds r3=8,r2 // set up second base pointer |
522 | ;; | 529 | SSM_PSR_I(p15, p15, r14) // restore psr.i |
523 | ssm psr.ic | PSR_DEFAULT_BITS | ||
524 | ;; | ||
525 | srlz.i // guarantee that interruption collectin is on | ||
526 | ;; | ||
527 | (p15) ssm psr.i // restore psr.i | ||
528 | movl r14=ia64_leave_kernel | 530 | movl r14=ia64_leave_kernel |
529 | ;; | 531 | ;; |
530 | SAVE_REST | 532 | SAVE_REST |
@@ -556,10 +558,10 @@ ENTRY(dirty_bit) | |||
556 | * page table TLB entry isn't present, we take a nested TLB miss hit where we look | 558 | * page table TLB entry isn't present, we take a nested TLB miss hit where we look |
557 | * up the physical address of the L3 PTE and then continue at label 1 below. | 559 | * up the physical address of the L3 PTE and then continue at label 1 below. |
558 | */ | 560 | */ |
559 | mov r16=cr.ifa // get the address that caused the fault | 561 | MOV_FROM_IFA(r16) // get the address that caused the fault |
560 | movl r30=1f // load continuation point in case of nested fault | 562 | movl r30=1f // load continuation point in case of nested fault |
561 | ;; | 563 | ;; |
562 | thash r17=r16 // compute virtual address of L3 PTE | 564 | THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE |
563 | mov r29=b0 // save b0 in case of nested fault | 565 | mov r29=b0 // save b0 in case of nested fault |
564 | mov r31=pr // save pr | 566 | mov r31=pr // save pr |
565 | #ifdef CONFIG_SMP | 567 | #ifdef CONFIG_SMP |
@@ -576,7 +578,7 @@ ENTRY(dirty_bit) | |||
576 | ;; | 578 | ;; |
577 | (p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present | 579 | (p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present |
578 | ;; | 580 | ;; |
579 | (p6) itc.d r25 // install updated PTE | 581 | ITC_D(p6, r25, r18) // install updated PTE |
580 | ;; | 582 | ;; |
581 | /* | 583 | /* |
582 | * Tell the assemblers dependency-violation checker that the above "itc" instructions | 584 | * Tell the assemblers dependency-violation checker that the above "itc" instructions |
@@ -602,7 +604,7 @@ ENTRY(dirty_bit) | |||
602 | itc.d r18 // install updated PTE | 604 | itc.d r18 // install updated PTE |
603 | #endif | 605 | #endif |
604 | mov pr=r31,-1 // restore pr | 606 | mov pr=r31,-1 // restore pr |
605 | rfi | 607 | RFI |
606 | END(dirty_bit) | 608 | END(dirty_bit) |
607 | 609 | ||
608 | .org ia64_ivt+0x2400 | 610 | .org ia64_ivt+0x2400 |
@@ -611,22 +613,22 @@ END(dirty_bit) | |||
611 | ENTRY(iaccess_bit) | 613 | ENTRY(iaccess_bit) |
612 | DBG_FAULT(9) | 614 | DBG_FAULT(9) |
613 | // Like Entry 8, except for instruction access | 615 | // Like Entry 8, except for instruction access |
614 | mov r16=cr.ifa // get the address that caused the fault | 616 | MOV_FROM_IFA(r16) // get the address that caused the fault |
615 | movl r30=1f // load continuation point in case of nested fault | 617 | movl r30=1f // load continuation point in case of nested fault |
616 | mov r31=pr // save predicates | 618 | mov r31=pr // save predicates |
617 | #ifdef CONFIG_ITANIUM | 619 | #ifdef CONFIG_ITANIUM |
618 | /* | 620 | /* |
619 | * Erratum 10 (IFA may contain incorrect address) has "NoFix" status. | 621 | * Erratum 10 (IFA may contain incorrect address) has "NoFix" status. |
620 | */ | 622 | */ |
621 | mov r17=cr.ipsr | 623 | MOV_FROM_IPSR(p0, r17) |
622 | ;; | 624 | ;; |
623 | mov r18=cr.iip | 625 | MOV_FROM_IIP(r18) |
624 | tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set? | 626 | tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set? |
625 | ;; | 627 | ;; |
626 | (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa | 628 | (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa |
627 | #endif /* CONFIG_ITANIUM */ | 629 | #endif /* CONFIG_ITANIUM */ |
628 | ;; | 630 | ;; |
629 | thash r17=r16 // compute virtual address of L3 PTE | 631 | THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE |
630 | mov r29=b0 // save b0 in case of nested fault) | 632 | mov r29=b0 // save b0 in case of nested fault) |
631 | #ifdef CONFIG_SMP | 633 | #ifdef CONFIG_SMP |
632 | mov r28=ar.ccv // save ar.ccv | 634 | mov r28=ar.ccv // save ar.ccv |
@@ -642,7 +644,7 @@ ENTRY(iaccess_bit) | |||
642 | ;; | 644 | ;; |
643 | (p6) cmp.eq p6,p7=r26,r18 // Only if page present | 645 | (p6) cmp.eq p6,p7=r26,r18 // Only if page present |
644 | ;; | 646 | ;; |
645 | (p6) itc.i r25 // install updated PTE | 647 | ITC_I(p6, r25, r26) // install updated PTE |
646 | ;; | 648 | ;; |
647 | /* | 649 | /* |
648 | * Tell the assemblers dependency-violation checker that the above "itc" instructions | 650 | * Tell the assemblers dependency-violation checker that the above "itc" instructions |
@@ -668,7 +670,7 @@ ENTRY(iaccess_bit) | |||
668 | itc.i r18 // install updated PTE | 670 | itc.i r18 // install updated PTE |
669 | #endif /* !CONFIG_SMP */ | 671 | #endif /* !CONFIG_SMP */ |
670 | mov pr=r31,-1 | 672 | mov pr=r31,-1 |
671 | rfi | 673 | RFI |
672 | END(iaccess_bit) | 674 | END(iaccess_bit) |
673 | 675 | ||
674 | .org ia64_ivt+0x2800 | 676 | .org ia64_ivt+0x2800 |
@@ -677,10 +679,10 @@ END(iaccess_bit) | |||
677 | ENTRY(daccess_bit) | 679 | ENTRY(daccess_bit) |
678 | DBG_FAULT(10) | 680 | DBG_FAULT(10) |
679 | // Like Entry 8, except for data access | 681 | // Like Entry 8, except for data access |
680 | mov r16=cr.ifa // get the address that caused the fault | 682 | MOV_FROM_IFA(r16) // get the address that caused the fault |
681 | movl r30=1f // load continuation point in case of nested fault | 683 | movl r30=1f // load continuation point in case of nested fault |
682 | ;; | 684 | ;; |
683 | thash r17=r16 // compute virtual address of L3 PTE | 685 | THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE |
684 | mov r31=pr | 686 | mov r31=pr |
685 | mov r29=b0 // save b0 in case of nested fault) | 687 | mov r29=b0 // save b0 in case of nested fault) |
686 | #ifdef CONFIG_SMP | 688 | #ifdef CONFIG_SMP |
@@ -697,7 +699,7 @@ ENTRY(daccess_bit) | |||
697 | ;; | 699 | ;; |
698 | (p6) cmp.eq p6,p7=r26,r18 // Only if page is present | 700 | (p6) cmp.eq p6,p7=r26,r18 // Only if page is present |
699 | ;; | 701 | ;; |
700 | (p6) itc.d r25 // install updated PTE | 702 | ITC_D(p6, r25, r26) // install updated PTE |
701 | /* | 703 | /* |
702 | * Tell the assemblers dependency-violation checker that the above "itc" instructions | 704 | * Tell the assemblers dependency-violation checker that the above "itc" instructions |
703 | * cannot possibly affect the following loads: | 705 | * cannot possibly affect the following loads: |
@@ -721,7 +723,7 @@ ENTRY(daccess_bit) | |||
721 | #endif | 723 | #endif |
722 | mov b0=r29 // restore b0 | 724 | mov b0=r29 // restore b0 |
723 | mov pr=r31,-1 | 725 | mov pr=r31,-1 |
724 | rfi | 726 | RFI |
725 | END(daccess_bit) | 727 | END(daccess_bit) |
726 | 728 | ||
727 | .org ia64_ivt+0x2c00 | 729 | .org ia64_ivt+0x2c00 |
@@ -745,10 +747,10 @@ ENTRY(break_fault) | |||
745 | */ | 747 | */ |
746 | DBG_FAULT(11) | 748 | DBG_FAULT(11) |
747 | mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc) | 749 | mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc) |
748 | mov r29=cr.ipsr // M2 (12 cyc) | 750 | MOV_FROM_IPSR(p0, r29) // M2 (12 cyc) |
749 | mov r31=pr // I0 (2 cyc) | 751 | mov r31=pr // I0 (2 cyc) |
750 | 752 | ||
751 | mov r17=cr.iim // M2 (2 cyc) | 753 | MOV_FROM_IIM(r17) // M2 (2 cyc) |
752 | mov.m r27=ar.rsc // M2 (12 cyc) | 754 | mov.m r27=ar.rsc // M2 (12 cyc) |
753 | mov r18=__IA64_BREAK_SYSCALL // A | 755 | mov r18=__IA64_BREAK_SYSCALL // A |
754 | 756 | ||
@@ -767,7 +769,7 @@ ENTRY(break_fault) | |||
767 | nop.m 0 | 769 | nop.m 0 |
768 | movl r30=sys_call_table // X | 770 | movl r30=sys_call_table // X |
769 | 771 | ||
770 | mov r28=cr.iip // M2 (2 cyc) | 772 | MOV_FROM_IIP(r28) // M2 (2 cyc) |
771 | cmp.eq p0,p7=r18,r17 // I0 is this a system call? | 773 | cmp.eq p0,p7=r18,r17 // I0 is this a system call? |
772 | (p7) br.cond.spnt non_syscall // B no -> | 774 | (p7) br.cond.spnt non_syscall // B no -> |
773 | // | 775 | // |
@@ -864,18 +866,17 @@ ENTRY(break_fault) | |||
864 | #endif | 866 | #endif |
865 | mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 | 867 | mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 |
866 | nop 0 | 868 | nop 0 |
867 | bsw.1 // B (6 cyc) regs are saved, switch to bank 1 | 869 | BSW_1(r2, r14) // B (6 cyc) regs are saved, switch to bank 1 |
868 | ;; | 870 | ;; |
869 | 871 | ||
870 | ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection | 872 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r16) // M2 now it's safe to re-enable intr.-collection |
873 | // M0 ensure interruption collection is on | ||
871 | movl r3=ia64_ret_from_syscall // X | 874 | movl r3=ia64_ret_from_syscall // X |
872 | ;; | 875 | ;; |
873 | |||
874 | srlz.i // M0 ensure interruption collection is on | ||
875 | mov rp=r3 // I0 set the real return addr | 876 | mov rp=r3 // I0 set the real return addr |
876 | (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT | 877 | (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT |
877 | 878 | ||
878 | (p15) ssm psr.i // M2 restore psr.i | 879 | SSM_PSR_I(p15, p15, r16) // M2 restore psr.i |
879 | (p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr) | 880 | (p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr) |
880 | br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic | 881 | br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic |
881 | // NOT REACHED | 882 | // NOT REACHED |
@@ -899,16 +900,15 @@ ENTRY(interrupt) | |||
899 | mov r31=pr // prepare to save predicates | 900 | mov r31=pr // prepare to save predicates |
900 | ;; | 901 | ;; |
901 | SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 | 902 | SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 |
902 | ssm psr.ic | PSR_DEFAULT_BITS | 903 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r14) |
903 | ;; | 904 | // ensure everybody knows psr.ic is back on |
904 | adds r3=8,r2 // set up second base pointer for SAVE_REST | 905 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
905 | srlz.i // ensure everybody knows psr.ic is back on | ||
906 | ;; | 906 | ;; |
907 | SAVE_REST | 907 | SAVE_REST |
908 | ;; | 908 | ;; |
909 | MCA_RECOVER_RANGE(interrupt) | 909 | MCA_RECOVER_RANGE(interrupt) |
910 | alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group | 910 | alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group |
911 | mov out0=cr.ivr // pass cr.ivr as first arg | 911 | MOV_FROM_IVR(out0, r8) // pass cr.ivr as first arg |
912 | add out1=16,sp // pass pointer to pt_regs as second arg | 912 | add out1=16,sp // pass pointer to pt_regs as second arg |
913 | ;; | 913 | ;; |
914 | srlz.d // make sure we see the effect of cr.ivr | 914 | srlz.d // make sure we see the effect of cr.ivr |
@@ -978,6 +978,7 @@ END(interrupt) | |||
978 | * - ar.fpsr: set to kernel settings | 978 | * - ar.fpsr: set to kernel settings |
979 | * - b6: preserved (same as on entry) | 979 | * - b6: preserved (same as on entry) |
980 | */ | 980 | */ |
981 | #ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE | ||
981 | GLOBAL_ENTRY(ia64_syscall_setup) | 982 | GLOBAL_ENTRY(ia64_syscall_setup) |
982 | #if PT(B6) != 0 | 983 | #if PT(B6) != 0 |
983 | # error This code assumes that b6 is the first field in pt_regs. | 984 | # error This code assumes that b6 is the first field in pt_regs. |
@@ -1069,6 +1070,7 @@ GLOBAL_ENTRY(ia64_syscall_setup) | |||
1069 | (p10) mov r8=-EINVAL | 1070 | (p10) mov r8=-EINVAL |
1070 | br.ret.sptk.many b7 | 1071 | br.ret.sptk.many b7 |
1071 | END(ia64_syscall_setup) | 1072 | END(ia64_syscall_setup) |
1073 | #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ | ||
1072 | 1074 | ||
1073 | .org ia64_ivt+0x3c00 | 1075 | .org ia64_ivt+0x3c00 |
1074 | ///////////////////////////////////////////////////////////////////////////////////////// | 1076 | ///////////////////////////////////////////////////////////////////////////////////////// |
@@ -1082,7 +1084,7 @@ END(ia64_syscall_setup) | |||
1082 | DBG_FAULT(16) | 1084 | DBG_FAULT(16) |
1083 | FAULT(16) | 1085 | FAULT(16) |
1084 | 1086 | ||
1085 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 1087 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE) |
1086 | /* | 1088 | /* |
1087 | * There is no particular reason for this code to be here, other than | 1089 | * There is no particular reason for this code to be here, other than |
1088 | * that there happens to be space here that would go unused otherwise. | 1090 | * that there happens to be space here that would go unused otherwise. |
@@ -1092,7 +1094,7 @@ END(ia64_syscall_setup) | |||
1092 | * account_sys_enter is called from SAVE_MIN* macros if accounting is | 1094 | * account_sys_enter is called from SAVE_MIN* macros if accounting is |
1093 | * enabled and if the macro is entered from user mode. | 1095 | * enabled and if the macro is entered from user mode. |
1094 | */ | 1096 | */ |
1095 | ENTRY(account_sys_enter) | 1097 | GLOBAL_ENTRY(account_sys_enter) |
1096 | // mov.m r20=ar.itc is called in advance, and r13 is current | 1098 | // mov.m r20=ar.itc is called in advance, and r13 is current |
1097 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 | 1099 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 |
1098 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 | 1100 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 |
@@ -1134,15 +1136,13 @@ ENTRY(non_syscall) | |||
1134 | // suitable spot... | 1136 | // suitable spot... |
1135 | 1137 | ||
1136 | alloc r14=ar.pfs,0,0,2,0 | 1138 | alloc r14=ar.pfs,0,0,2,0 |
1137 | mov out0=cr.iim | 1139 | MOV_FROM_IIM(out0) |
1138 | add out1=16,sp | 1140 | add out1=16,sp |
1139 | adds r3=8,r2 // set up second base pointer for SAVE_REST | 1141 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
1140 | 1142 | ||
1141 | ssm psr.ic | PSR_DEFAULT_BITS | 1143 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r15, r24) |
1142 | ;; | 1144 | // guarantee that interruption collection is on |
1143 | srlz.i // guarantee that interruption collection is on | 1145 | SSM_PSR_I(p15, p15, r15) // restore psr.i |
1144 | ;; | ||
1145 | (p15) ssm psr.i // restore psr.i | ||
1146 | movl r15=ia64_leave_kernel | 1146 | movl r15=ia64_leave_kernel |
1147 | ;; | 1147 | ;; |
1148 | SAVE_REST | 1148 | SAVE_REST |
@@ -1168,14 +1168,12 @@ ENTRY(dispatch_unaligned_handler) | |||
1168 | SAVE_MIN_WITH_COVER | 1168 | SAVE_MIN_WITH_COVER |
1169 | ;; | 1169 | ;; |
1170 | alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) | 1170 | alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) |
1171 | mov out0=cr.ifa | 1171 | MOV_FROM_IFA(out0) |
1172 | adds out1=16,sp | 1172 | adds out1=16,sp |
1173 | 1173 | ||
1174 | ssm psr.ic | PSR_DEFAULT_BITS | 1174 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24) |
1175 | ;; | 1175 | // guarantee that interruption collection is on |
1176 | srlz.i // guarantee that interruption collection is on | 1176 | SSM_PSR_I(p15, p15, r3) // restore psr.i |
1177 | ;; | ||
1178 | (p15) ssm psr.i // restore psr.i | ||
1179 | adds r3=8,r2 // set up second base pointer | 1177 | adds r3=8,r2 // set up second base pointer |
1180 | ;; | 1178 | ;; |
1181 | SAVE_REST | 1179 | SAVE_REST |
@@ -1207,17 +1205,16 @@ ENTRY(dispatch_to_fault_handler) | |||
1207 | */ | 1205 | */ |
1208 | SAVE_MIN_WITH_COVER_R19 | 1206 | SAVE_MIN_WITH_COVER_R19 |
1209 | alloc r14=ar.pfs,0,0,5,0 | 1207 | alloc r14=ar.pfs,0,0,5,0 |
1210 | mov out0=r15 | 1208 | MOV_FROM_ISR(out1) |
1211 | mov out1=cr.isr | 1209 | MOV_FROM_IFA(out2) |
1212 | mov out2=cr.ifa | 1210 | MOV_FROM_IIM(out3) |
1213 | mov out3=cr.iim | 1211 | MOV_FROM_ITIR(out4) |
1214 | mov out4=cr.itir | ||
1215 | ;; | 1212 | ;; |
1216 | ssm psr.ic | PSR_DEFAULT_BITS | 1213 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, out0) |
1217 | ;; | 1214 | // guarantee that interruption collection is on |
1218 | srlz.i // guarantee that interruption collection is on | 1215 | mov out0=r15 |
1219 | ;; | 1216 | ;; |
1220 | (p15) ssm psr.i // restore psr.i | 1217 | SSM_PSR_I(p15, p15, r3) // restore psr.i |
1221 | adds r3=8,r2 // set up second base pointer for SAVE_REST | 1218 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
1222 | ;; | 1219 | ;; |
1223 | SAVE_REST | 1220 | SAVE_REST |
@@ -1236,8 +1233,8 @@ END(dispatch_to_fault_handler) | |||
1236 | // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49) | 1233 | // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49) |
1237 | ENTRY(page_not_present) | 1234 | ENTRY(page_not_present) |
1238 | DBG_FAULT(20) | 1235 | DBG_FAULT(20) |
1239 | mov r16=cr.ifa | 1236 | MOV_FROM_IFA(r16) |
1240 | rsm psr.dt | 1237 | RSM_PSR_DT |
1241 | /* | 1238 | /* |
1242 | * The Linux page fault handler doesn't expect non-present pages to be in | 1239 | * The Linux page fault handler doesn't expect non-present pages to be in |
1243 | * the TLB. Flush the existing entry now, so we meet that expectation. | 1240 | * the TLB. Flush the existing entry now, so we meet that expectation. |
@@ -1256,8 +1253,8 @@ END(page_not_present) | |||
1256 | // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52) | 1253 | // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52) |
1257 | ENTRY(key_permission) | 1254 | ENTRY(key_permission) |
1258 | DBG_FAULT(21) | 1255 | DBG_FAULT(21) |
1259 | mov r16=cr.ifa | 1256 | MOV_FROM_IFA(r16) |
1260 | rsm psr.dt | 1257 | RSM_PSR_DT |
1261 | mov r31=pr | 1258 | mov r31=pr |
1262 | ;; | 1259 | ;; |
1263 | srlz.d | 1260 | srlz.d |
@@ -1269,8 +1266,8 @@ END(key_permission) | |||
1269 | // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) | 1266 | // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) |
1270 | ENTRY(iaccess_rights) | 1267 | ENTRY(iaccess_rights) |
1271 | DBG_FAULT(22) | 1268 | DBG_FAULT(22) |
1272 | mov r16=cr.ifa | 1269 | MOV_FROM_IFA(r16) |
1273 | rsm psr.dt | 1270 | RSM_PSR_DT |
1274 | mov r31=pr | 1271 | mov r31=pr |
1275 | ;; | 1272 | ;; |
1276 | srlz.d | 1273 | srlz.d |
@@ -1282,8 +1279,8 @@ END(iaccess_rights) | |||
1282 | // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) | 1279 | // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) |
1283 | ENTRY(daccess_rights) | 1280 | ENTRY(daccess_rights) |
1284 | DBG_FAULT(23) | 1281 | DBG_FAULT(23) |
1285 | mov r16=cr.ifa | 1282 | MOV_FROM_IFA(r16) |
1286 | rsm psr.dt | 1283 | RSM_PSR_DT |
1287 | mov r31=pr | 1284 | mov r31=pr |
1288 | ;; | 1285 | ;; |
1289 | srlz.d | 1286 | srlz.d |
@@ -1295,7 +1292,7 @@ END(daccess_rights) | |||
1295 | // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) | 1292 | // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) |
1296 | ENTRY(general_exception) | 1293 | ENTRY(general_exception) |
1297 | DBG_FAULT(24) | 1294 | DBG_FAULT(24) |
1298 | mov r16=cr.isr | 1295 | MOV_FROM_ISR(r16) |
1299 | mov r31=pr | 1296 | mov r31=pr |
1300 | ;; | 1297 | ;; |
1301 | cmp4.eq p6,p0=0,r16 | 1298 | cmp4.eq p6,p0=0,r16 |
@@ -1324,8 +1321,8 @@ END(disabled_fp_reg) | |||
1324 | ENTRY(nat_consumption) | 1321 | ENTRY(nat_consumption) |
1325 | DBG_FAULT(26) | 1322 | DBG_FAULT(26) |
1326 | 1323 | ||
1327 | mov r16=cr.ipsr | 1324 | MOV_FROM_IPSR(p0, r16) |
1328 | mov r17=cr.isr | 1325 | MOV_FROM_ISR(r17) |
1329 | mov r31=pr // save PR | 1326 | mov r31=pr // save PR |
1330 | ;; | 1327 | ;; |
1331 | and r18=0xf,r17 // r18 = cr.ipsr.code{3:0} | 1328 | and r18=0xf,r17 // r18 = cr.ipsr.code{3:0} |
@@ -1335,10 +1332,10 @@ ENTRY(nat_consumption) | |||
1335 | dep r16=-1,r16,IA64_PSR_ED_BIT,1 | 1332 | dep r16=-1,r16,IA64_PSR_ED_BIT,1 |
1336 | (p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH) | 1333 | (p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH) |
1337 | ;; | 1334 | ;; |
1338 | mov cr.ipsr=r16 // set cr.ipsr.na | 1335 | MOV_TO_IPSR(p0, r16, r18) |
1339 | mov pr=r31,-1 | 1336 | mov pr=r31,-1 |
1340 | ;; | 1337 | ;; |
1341 | rfi | 1338 | RFI |
1342 | 1339 | ||
1343 | 1: mov pr=r31,-1 | 1340 | 1: mov pr=r31,-1 |
1344 | ;; | 1341 | ;; |
@@ -1360,26 +1357,26 @@ ENTRY(speculation_vector) | |||
1360 | * | 1357 | * |
1361 | * cr.imm contains zero_ext(imm21) | 1358 | * cr.imm contains zero_ext(imm21) |
1362 | */ | 1359 | */ |
1363 | mov r18=cr.iim | 1360 | MOV_FROM_IIM(r18) |
1364 | ;; | 1361 | ;; |
1365 | mov r17=cr.iip | 1362 | MOV_FROM_IIP(r17) |
1366 | shl r18=r18,43 // put sign bit in position (43=64-21) | 1363 | shl r18=r18,43 // put sign bit in position (43=64-21) |
1367 | ;; | 1364 | ;; |
1368 | 1365 | ||
1369 | mov r16=cr.ipsr | 1366 | MOV_FROM_IPSR(p0, r16) |
1370 | shr r18=r18,39 // sign extend (39=43-4) | 1367 | shr r18=r18,39 // sign extend (39=43-4) |
1371 | ;; | 1368 | ;; |
1372 | 1369 | ||
1373 | add r17=r17,r18 // now add the offset | 1370 | add r17=r17,r18 // now add the offset |
1374 | ;; | 1371 | ;; |
1375 | mov cr.iip=r17 | 1372 | MOV_FROM_IIP(r17) |
1376 | dep r16=0,r16,41,2 // clear EI | 1373 | dep r16=0,r16,41,2 // clear EI |
1377 | ;; | 1374 | ;; |
1378 | 1375 | ||
1379 | mov cr.ipsr=r16 | 1376 | MOV_FROM_IPSR(p0, r16) |
1380 | ;; | 1377 | ;; |
1381 | 1378 | ||
1382 | rfi // and go back | 1379 | RFI |
1383 | END(speculation_vector) | 1380 | END(speculation_vector) |
1384 | 1381 | ||
1385 | .org ia64_ivt+0x5800 | 1382 | .org ia64_ivt+0x5800 |
@@ -1517,11 +1514,11 @@ ENTRY(ia32_intercept) | |||
1517 | DBG_FAULT(46) | 1514 | DBG_FAULT(46) |
1518 | #ifdef CONFIG_IA32_SUPPORT | 1515 | #ifdef CONFIG_IA32_SUPPORT |
1519 | mov r31=pr | 1516 | mov r31=pr |
1520 | mov r16=cr.isr | 1517 | MOV_FROM_ISR(r16) |
1521 | ;; | 1518 | ;; |
1522 | extr.u r17=r16,16,8 // get ISR.code | 1519 | extr.u r17=r16,16,8 // get ISR.code |
1523 | mov r18=ar.eflag | 1520 | mov r18=ar.eflag |
1524 | mov r19=cr.iim // old eflag value | 1521 | MOV_FROM_IIM(r19) // old eflag value |
1525 | ;; | 1522 | ;; |
1526 | cmp.ne p6,p0=2,r17 | 1523 | cmp.ne p6,p0=2,r17 |
1527 | (p6) br.cond.spnt 1f // not a system flag fault | 1524 | (p6) br.cond.spnt 1f // not a system flag fault |
@@ -1533,7 +1530,7 @@ ENTRY(ia32_intercept) | |||
1533 | (p6) br.cond.spnt 1f // eflags.ac bit didn't change | 1530 | (p6) br.cond.spnt 1f // eflags.ac bit didn't change |
1534 | ;; | 1531 | ;; |
1535 | mov pr=r31,-1 // restore predicate registers | 1532 | mov pr=r31,-1 // restore predicate registers |
1536 | rfi | 1533 | RFI |
1537 | 1534 | ||
1538 | 1: | 1535 | 1: |
1539 | #endif // CONFIG_IA32_SUPPORT | 1536 | #endif // CONFIG_IA32_SUPPORT |
@@ -1686,11 +1683,10 @@ ENTRY(dispatch_illegal_op_fault) | |||
1686 | .prologue | 1683 | .prologue |
1687 | .body | 1684 | .body |
1688 | SAVE_MIN_WITH_COVER | 1685 | SAVE_MIN_WITH_COVER |
1689 | ssm psr.ic | PSR_DEFAULT_BITS | 1686 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24) |
1687 | // guarantee that interruption collection is on | ||
1690 | ;; | 1688 | ;; |
1691 | srlz.i // guarantee that interruption collection is on | 1689 | SSM_PSR_I(p15, p15, r3) // restore psr.i |
1692 | ;; | ||
1693 | (p15) ssm psr.i // restore psr.i | ||
1694 | adds r3=8,r2 // set up second base pointer for SAVE_REST | 1690 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
1695 | ;; | 1691 | ;; |
1696 | alloc r14=ar.pfs,0,0,1,0 // must be first in insn group | 1692 | alloc r14=ar.pfs,0,0,1,0 // must be first in insn group |
@@ -1729,12 +1725,11 @@ END(dispatch_illegal_op_fault) | |||
1729 | ENTRY(dispatch_to_ia32_handler) | 1725 | ENTRY(dispatch_to_ia32_handler) |
1730 | SAVE_MIN | 1726 | SAVE_MIN |
1731 | ;; | 1727 | ;; |
1732 | mov r14=cr.isr | 1728 | MOV_FROM_ISR(r14) |
1733 | ssm psr.ic | PSR_DEFAULT_BITS | 1729 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24) |
1734 | ;; | 1730 | // guarantee that interruption collection is on |
1735 | srlz.i // guarantee that interruption collection is on | ||
1736 | ;; | 1731 | ;; |
1737 | (p15) ssm psr.i | 1732 | SSM_PSR_I(p15, p15, r3) |
1738 | adds r3=8,r2 // Base pointer for SAVE_REST | 1733 | adds r3=8,r2 // Base pointer for SAVE_REST |
1739 | ;; | 1734 | ;; |
1740 | SAVE_REST | 1735 | SAVE_REST |