diff options
Diffstat (limited to 'arch/ia64/kernel/mca_asm.S')
-rw-r--r-- | arch/ia64/kernel/mca_asm.S | 1358 |
1 files changed, 718 insertions, 640 deletions
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index ef3fd7265b67..499a065f4e60 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
@@ -16,6 +16,9 @@ | |||
16 | // 04/11/12 Russ Anderson <rja@sgi.com> | 16 | // 04/11/12 Russ Anderson <rja@sgi.com> |
17 | // Added per cpu MCA/INIT stack save areas. | 17 | // Added per cpu MCA/INIT stack save areas. |
18 | // | 18 | // |
19 | // 12/08/05 Keith Owens <kaos@sgi.com> | ||
20 | // Use per cpu MCA/INIT stacks for all data. | ||
21 | // | ||
19 | #include <linux/config.h> | 22 | #include <linux/config.h> |
20 | #include <linux/threads.h> | 23 | #include <linux/threads.h> |
21 | 24 | ||
@@ -25,96 +28,23 @@ | |||
25 | #include <asm/mca_asm.h> | 28 | #include <asm/mca_asm.h> |
26 | #include <asm/mca.h> | 29 | #include <asm/mca.h> |
27 | 30 | ||
28 | /* | 31 | #include "entry.h" |
29 | * When we get a machine check, the kernel stack pointer is no longer | ||
30 | * valid, so we need to set a new stack pointer. | ||
31 | */ | ||
32 | #define MINSTATE_PHYS /* Make sure stack access is physical for MINSTATE */ | ||
33 | |||
34 | /* | ||
35 | * Needed for return context to SAL | ||
36 | */ | ||
37 | #define IA64_MCA_SAME_CONTEXT 0 | ||
38 | #define IA64_MCA_COLD_BOOT -2 | ||
39 | |||
40 | #include "minstate.h" | ||
41 | |||
42 | /* | ||
43 | * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec) | ||
44 | * 1. GR1 = OS GP | ||
45 | * 2. GR8 = PAL_PROC physical address | ||
46 | * 3. GR9 = SAL_PROC physical address | ||
47 | * 4. GR10 = SAL GP (physical) | ||
48 | * 5. GR11 = Rendez state | ||
49 | * 6. GR12 = Return address to location within SAL_CHECK | ||
50 | */ | ||
51 | #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \ | ||
52 | LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \ | ||
53 | st8 [_tmp]=r1,0x08;; \ | ||
54 | st8 [_tmp]=r8,0x08;; \ | ||
55 | st8 [_tmp]=r9,0x08;; \ | ||
56 | st8 [_tmp]=r10,0x08;; \ | ||
57 | st8 [_tmp]=r11,0x08;; \ | ||
58 | st8 [_tmp]=r12,0x08;; \ | ||
59 | st8 [_tmp]=r17,0x08;; \ | ||
60 | st8 [_tmp]=r18,0x08 | ||
61 | |||
62 | /* | ||
63 | * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec) | ||
64 | * (p6) is executed if we never entered virtual mode (TLB error) | ||
65 | * (p7) is executed if we entered virtual mode as expected (normal case) | ||
66 | * 1. GR8 = OS_MCA return status | ||
67 | * 2. GR9 = SAL GP (physical) | ||
68 | * 3. GR10 = 0/1 returning same/new context | ||
69 | * 4. GR22 = New min state save area pointer | ||
70 | * returns ptr to SAL rtn save loc in _tmp | ||
71 | */ | ||
72 | #define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \ | ||
73 | movl _tmp=ia64_os_to_sal_handoff_state;; \ | ||
74 | DATA_VA_TO_PA(_tmp);; \ | ||
75 | ld8 r8=[_tmp],0x08;; \ | ||
76 | ld8 r9=[_tmp],0x08;; \ | ||
77 | ld8 r10=[_tmp],0x08;; \ | ||
78 | ld8 r22=[_tmp],0x08;; | ||
79 | // now _tmp is pointing to SAL rtn save location | ||
80 | |||
81 | /* | ||
82 | * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state | ||
83 | * imots_os_status=IA64_MCA_COLD_BOOT | ||
84 | * imots_sal_gp=SAL GP | ||
85 | * imots_context=IA64_MCA_SAME_CONTEXT | ||
86 | * imots_new_min_state=Min state save area pointer | ||
87 | * imots_sal_check_ra=Return address to location within SAL_CHECK | ||
88 | * | ||
89 | */ | ||
90 | #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\ | ||
91 | movl tmp=IA64_MCA_COLD_BOOT; \ | ||
92 | movl sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state); \ | ||
93 | movl os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);; \ | ||
94 | st8 [os_to_sal_handoff]=tmp,8;; \ | ||
95 | ld8 tmp=[sal_to_os_handoff],48;; \ | ||
96 | st8 [os_to_sal_handoff]=tmp,8;; \ | ||
97 | movl tmp=IA64_MCA_SAME_CONTEXT;; \ | ||
98 | st8 [os_to_sal_handoff]=tmp,8;; \ | ||
99 | ld8 tmp=[sal_to_os_handoff],-8;; \ | ||
100 | st8 [os_to_sal_handoff]=tmp,8;; \ | ||
101 | ld8 tmp=[sal_to_os_handoff];; \ | ||
102 | st8 [os_to_sal_handoff]=tmp;; | ||
103 | 32 | ||
104 | #define GET_IA64_MCA_DATA(reg) \ | 33 | #define GET_IA64_MCA_DATA(reg) \ |
105 | GET_THIS_PADDR(reg, ia64_mca_data) \ | 34 | GET_THIS_PADDR(reg, ia64_mca_data) \ |
106 | ;; \ | 35 | ;; \ |
107 | ld8 reg=[reg] | 36 | ld8 reg=[reg] |
108 | 37 | ||
109 | .global ia64_os_mca_dispatch | ||
110 | .global ia64_os_mca_dispatch_end | ||
111 | .global ia64_sal_to_os_handoff_state | ||
112 | .global ia64_os_to_sal_handoff_state | ||
113 | .global ia64_do_tlb_purge | 38 | .global ia64_do_tlb_purge |
39 | .global ia64_os_mca_dispatch | ||
40 | .global ia64_os_init_dispatch_monarch | ||
41 | .global ia64_os_init_dispatch_slave | ||
114 | 42 | ||
115 | .text | 43 | .text |
116 | .align 16 | 44 | .align 16 |
117 | 45 | ||
46 | //StartMain//////////////////////////////////////////////////////////////////// | ||
47 | |||
118 | /* | 48 | /* |
119 | * Just the TLB purge part is moved to a separate function | 49 | * Just the TLB purge part is moved to a separate function |
120 | * so we can re-use the code for cpu hotplug code as well | 50 | * so we can re-use the code for cpu hotplug code as well |
@@ -207,34 +137,31 @@ ia64_do_tlb_purge: | |||
207 | br.sptk.many b1 | 137 | br.sptk.many b1 |
208 | ;; | 138 | ;; |
209 | 139 | ||
210 | ia64_os_mca_dispatch: | 140 | //EndMain////////////////////////////////////////////////////////////////////// |
141 | |||
142 | //StartMain//////////////////////////////////////////////////////////////////// | ||
211 | 143 | ||
144 | ia64_os_mca_dispatch: | ||
212 | // Serialize all MCA processing | 145 | // Serialize all MCA processing |
213 | mov r3=1;; | 146 | mov r3=1;; |
214 | LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; | 147 | LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; |
215 | ia64_os_mca_spin: | 148 | ia64_os_mca_spin: |
216 | xchg8 r4=[r2],r3;; | 149 | xchg4 r4=[r2],r3;; |
217 | cmp.ne p6,p0=r4,r0 | 150 | cmp.ne p6,p0=r4,r0 |
218 | (p6) br ia64_os_mca_spin | 151 | (p6) br ia64_os_mca_spin |
219 | 152 | ||
220 | // Save the SAL to OS MCA handoff state as defined | 153 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack |
221 | // by SAL SPEC 3.0 | 154 | LOAD_PHYSICAL(p0,r2,1f) // return address |
222 | // NOTE : The order in which the state gets saved | 155 | mov r19=1 // All MCA events are treated as monarch (for now) |
223 | // is dependent on the way the C-structure | 156 | br.sptk ia64_state_save // save the state that is not in minstate |
224 | // for ia64_mca_sal_to_os_state_t has been | 157 | 1: |
225 | // defined in include/asm/mca.h | ||
226 | SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) | ||
227 | ;; | ||
228 | |||
229 | // LOG PROCESSOR STATE INFO FROM HERE ON.. | ||
230 | begin_os_mca_dump: | ||
231 | br ia64_os_mca_proc_state_dump;; | ||
232 | 158 | ||
233 | ia64_os_mca_done_dump: | 159 | GET_IA64_MCA_DATA(r2) |
234 | 160 | // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param | |
235 | LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56) | 161 | ;; |
162 | add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, r2 | ||
236 | ;; | 163 | ;; |
237 | ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK. | 164 | ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK. |
238 | ;; | 165 | ;; |
239 | tbit.nz p6,p7=r18,60 | 166 | tbit.nz p6,p7=r18,60 |
240 | (p7) br.spnt done_tlb_purge_and_reload | 167 | (p7) br.spnt done_tlb_purge_and_reload |
@@ -323,624 +250,775 @@ ia64_reload_tr: | |||
323 | itr.d dtr[r20]=r16 | 250 | itr.d dtr[r20]=r16 |
324 | ;; | 251 | ;; |
325 | srlz.d | 252 | srlz.d |
326 | ;; | ||
327 | br.sptk.many done_tlb_purge_and_reload | ||
328 | err: | ||
329 | COLD_BOOT_HANDOFF_STATE(r20,r21,r22) | ||
330 | br.sptk.many ia64_os_mca_done_restore | ||
331 | 253 | ||
332 | done_tlb_purge_and_reload: | 254 | done_tlb_purge_and_reload: |
333 | 255 | ||
334 | // Setup new stack frame for OS_MCA handling | 256 | // switch to per cpu MCA stack |
335 | GET_IA64_MCA_DATA(r2) | 257 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack |
336 | ;; | 258 | LOAD_PHYSICAL(p0,r2,1f) // return address |
337 | add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2 | 259 | br.sptk ia64_new_stack |
338 | add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2 | 260 | 1: |
339 | ;; | 261 | |
340 | rse_switch_context(r6,r3,r2);; // RSC management in this new context | 262 | // everything saved, now we can set the kernel registers |
263 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack | ||
264 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
265 | br.sptk ia64_set_kernel_registers | ||
266 | 1: | ||
341 | 267 | ||
268 | // This must be done in physical mode | ||
342 | GET_IA64_MCA_DATA(r2) | 269 | GET_IA64_MCA_DATA(r2) |
343 | ;; | 270 | ;; |
344 | add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2 | 271 | mov r7=r2 |
345 | ;; | ||
346 | mov r12=r2 // establish new stack-pointer | ||
347 | 272 | ||
348 | // Enter virtual mode from physical mode | 273 | // Enter virtual mode from physical mode |
349 | VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4) | 274 | VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4) |
350 | ia64_os_mca_virtual_begin: | 275 | |
276 | // This code returns to SAL via SOS r2, in general SAL has no unwind | ||
277 | // data. To get a clean termination when backtracing the C MCA/INIT | ||
278 | // handler, set a dummy return address of 0 in this routine. That | ||
279 | // requires that ia64_os_mca_virtual_begin be a global function. | ||
280 | ENTRY(ia64_os_mca_virtual_begin) | ||
281 | .prologue | ||
282 | .save rp,r0 | ||
283 | .body | ||
284 | |||
285 | mov ar.rsc=3 // set eager mode for C handler | ||
286 | mov r2=r7 // see GET_IA64_MCA_DATA above | ||
287 | ;; | ||
351 | 288 | ||
352 | // Call virtual mode handler | 289 | // Call virtual mode handler |
353 | movl r2=ia64_mca_ucmc_handler;; | 290 | alloc r14=ar.pfs,0,0,3,0 |
354 | mov b6=r2;; | 291 | ;; |
355 | br.call.sptk.many b0=b6;; | 292 | DATA_PA_TO_VA(r2,r7) |
356 | .ret0: | 293 | ;; |
294 | add out0=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2 | ||
295 | add out1=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2 | ||
296 | add out2=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET, r2 | ||
297 | br.call.sptk.many b0=ia64_mca_handler | ||
298 | |||
357 | // Revert back to physical mode before going back to SAL | 299 | // Revert back to physical mode before going back to SAL |
358 | PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4) | 300 | PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4) |
359 | ia64_os_mca_virtual_end: | 301 | ia64_os_mca_virtual_end: |
360 | 302 | ||
361 | // restore the original stack frame here | 303 | END(ia64_os_mca_virtual_begin) |
304 | |||
305 | // switch back to previous stack | ||
306 | alloc r14=ar.pfs,0,0,0,0 // remove the MCA handler frame | ||
307 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack | ||
308 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
309 | br.sptk ia64_old_stack | ||
310 | 1: | ||
311 | |||
312 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack | ||
313 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
314 | br.sptk ia64_state_restore // restore the SAL state | ||
315 | 1: | ||
316 | |||
317 | mov b0=r12 // SAL_CHECK return address | ||
318 | |||
319 | // release lock | ||
320 | LOAD_PHYSICAL(p0,r3,ia64_mca_serialize);; | ||
321 | st4.rel [r3]=r0 | ||
322 | |||
323 | br b0 | ||
324 | |||
325 | //EndMain////////////////////////////////////////////////////////////////////// | ||
326 | |||
327 | //StartMain//////////////////////////////////////////////////////////////////// | ||
328 | |||
329 | // | ||
330 | // SAL to OS entry point for INIT on all processors. This has been defined for | ||
331 | // registration purposes with SAL as a part of ia64_mca_init. Monarch and | ||
332 | // slave INIT have identical processing, except for the value of the | ||
333 | // sos->monarch flag in r19. | ||
334 | // | ||
335 | |||
336 | ia64_os_init_dispatch_monarch: | ||
337 | mov r19=1 // Bow, bow, ye lower middle classes! | ||
338 | br.sptk ia64_os_init_dispatch | ||
339 | |||
340 | ia64_os_init_dispatch_slave: | ||
341 | mov r19=0 // <igor>yeth, mathter</igor> | ||
342 | |||
343 | ia64_os_init_dispatch: | ||
344 | |||
345 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack | ||
346 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
347 | br.sptk ia64_state_save // save the state that is not in minstate | ||
348 | 1: | ||
349 | |||
350 | // switch to per cpu INIT stack | ||
351 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack | ||
352 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
353 | br.sptk ia64_new_stack | ||
354 | 1: | ||
355 | |||
356 | // everything saved, now we can set the kernel registers | ||
357 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack | ||
358 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
359 | br.sptk ia64_set_kernel_registers | ||
360 | 1: | ||
361 | |||
362 | // This must be done in physical mode | ||
362 | GET_IA64_MCA_DATA(r2) | 363 | GET_IA64_MCA_DATA(r2) |
363 | ;; | 364 | ;; |
364 | add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2 | 365 | mov r7=r2 |
365 | ;; | 366 | |
366 | movl r4=IA64_PSR_MC | 367 | // Enter virtual mode from physical mode |
368 | VIRTUAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_begin, r4) | ||
369 | |||
370 | // This code returns to SAL via SOS r2, in general SAL has no unwind | ||
371 | // data. To get a clean termination when backtracing the C MCA/INIT | ||
372 | // handler, set a dummy return address of 0 in this routine. That | ||
373 | // requires that ia64_os_init_virtual_begin be a global function. | ||
374 | ENTRY(ia64_os_init_virtual_begin) | ||
375 | .prologue | ||
376 | .save rp,r0 | ||
377 | .body | ||
378 | |||
379 | mov ar.rsc=3 // set eager mode for C handler | ||
380 | mov r2=r7 // see GET_IA64_MCA_DATA above | ||
367 | ;; | 381 | ;; |
368 | rse_return_context(r4,r3,r2) // switch from interrupt context for RSE | ||
369 | 382 | ||
370 | // let us restore all the registers from our PSI structure | 383 | // Call virtual mode handler |
371 | mov r8=gp | 384 | alloc r14=ar.pfs,0,0,3,0 |
385 | ;; | ||
386 | DATA_PA_TO_VA(r2,r7) | ||
372 | ;; | 387 | ;; |
373 | begin_os_mca_restore: | 388 | add out0=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2 |
374 | br ia64_os_mca_proc_state_restore;; | 389 | add out1=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2 |
390 | add out2=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SOS_OFFSET, r2 | ||
391 | br.call.sptk.many b0=ia64_init_handler | ||
375 | 392 | ||
376 | ia64_os_mca_done_restore: | 393 | // Revert back to physical mode before going back to SAL |
377 | OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);; | 394 | PHYSICAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_end, r4) |
378 | // branch back to SALE_CHECK | 395 | ia64_os_init_virtual_end: |
379 | ld8 r3=[r2];; | ||
380 | mov b0=r3;; // SAL_CHECK return address | ||
381 | 396 | ||
382 | // release lock | 397 | END(ia64_os_init_virtual_begin) |
383 | movl r3=ia64_mca_serialize;; | 398 | |
384 | DATA_VA_TO_PA(r3);; | 399 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack |
385 | st8.rel [r3]=r0 | 400 | LOAD_PHYSICAL(p0,r2,1f) // return address |
401 | br.sptk ia64_state_restore // restore the SAL state | ||
402 | 1: | ||
386 | 403 | ||
404 | // switch back to previous stack | ||
405 | alloc r14=ar.pfs,0,0,0,0 // remove the INIT handler frame | ||
406 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack | ||
407 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
408 | br.sptk ia64_old_stack | ||
409 | 1: | ||
410 | |||
411 | mov b0=r12 // SAL_CHECK return address | ||
387 | br b0 | 412 | br b0 |
388 | ;; | 413 | |
389 | ia64_os_mca_dispatch_end: | ||
390 | //EndMain////////////////////////////////////////////////////////////////////// | 414 | //EndMain////////////////////////////////////////////////////////////////////// |
391 | 415 | ||
416 | // common defines for the stubs | ||
417 | #define ms r4 | ||
418 | #define regs r5 | ||
419 | #define temp1 r2 /* careful, it overlaps with input registers */ | ||
420 | #define temp2 r3 /* careful, it overlaps with input registers */ | ||
421 | #define temp3 r7 | ||
422 | #define temp4 r14 | ||
423 | |||
392 | 424 | ||
393 | //++ | 425 | //++ |
394 | // Name: | 426 | // Name: |
395 | // ia64_os_mca_proc_state_dump() | 427 | // ia64_state_save() |
396 | // | 428 | // |
397 | // Stub Description: | 429 | // Stub Description: |
398 | // | 430 | // |
399 | // This stub dumps the processor state during MCHK to a data area | 431 | // Save the state that is not in minstate. This is sensitive to the layout of |
432 | // struct ia64_sal_os_state in mca.h. | ||
433 | // | ||
434 | // r2 contains the return address, r3 contains either | ||
435 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. | ||
436 | // | ||
437 | // The OS to SAL section of struct ia64_sal_os_state is set to a default | ||
438 | // value of cold boot (MCA) or warm boot (INIT) and return to the same | ||
439 | // context. ia64_sal_os_state is also used to hold some registers that | ||
440 | // need to be saved and restored across the stack switches. | ||
441 | // | ||
442 | // Most input registers to this stub come from PAL/SAL | ||
443 | // r1 os gp, physical | ||
444 | // r8 pal_proc entry point | ||
445 | // r9 sal_proc entry point | ||
446 | // r10 sal gp | ||
447 | // r11 MCA - rendevzous state, INIT - reason code | ||
448 | // r12 sal return address | ||
449 | // r17 pal min_state | ||
450 | // r18 processor state parameter | ||
451 | // r19 monarch flag, set by the caller of this routine | ||
452 | // | ||
453 | // In addition to the SAL to OS state, this routine saves all the | ||
454 | // registers that appear in struct pt_regs and struct switch_stack, | ||
455 | // excluding those that are already in the PAL minstate area. This | ||
456 | // results in a partial pt_regs and switch_stack, the C code copies the | ||
457 | // remaining registers from PAL minstate to pt_regs and switch_stack. The | ||
458 | // resulting structures contain all the state of the original process when | ||
459 | // MCA/INIT occurred. | ||
400 | // | 460 | // |
401 | //-- | 461 | //-- |
402 | 462 | ||
403 | ia64_os_mca_proc_state_dump: | 463 | ia64_state_save: |
404 | // Save bank 1 GRs 16-31 which will be used by c-language code when we switch | 464 | add regs=MCA_SOS_OFFSET, r3 |
405 | // to virtual addressing mode. | 465 | add ms=MCA_SOS_OFFSET+8, r3 |
406 | GET_IA64_MCA_DATA(r2) | 466 | mov b0=r2 // save return address |
467 | cmp.eq p1,p2=IA64_MCA_CPU_MCA_STACK_OFFSET, r3 | ||
468 | ;; | ||
469 | GET_IA64_MCA_DATA(temp2) | ||
470 | ;; | ||
471 | add temp1=temp2, regs // struct ia64_sal_os_state on MCA or INIT stack | ||
472 | add temp2=temp2, ms // struct ia64_sal_os_state+8 on MCA or INIT stack | ||
473 | ;; | ||
474 | mov regs=temp1 // save the start of sos | ||
475 | st8 [temp1]=r1,16 // os_gp | ||
476 | st8 [temp2]=r8,16 // pal_proc | ||
477 | ;; | ||
478 | st8 [temp1]=r9,16 // sal_proc | ||
479 | st8 [temp2]=r11,16 // rv_rc | ||
480 | mov r11=cr.iipa | ||
481 | ;; | ||
482 | st8 [temp1]=r18,16 // proc_state_param | ||
483 | st8 [temp2]=r19,16 // monarch | ||
484 | mov r6=IA64_KR(CURRENT) | ||
485 | ;; | ||
486 | st8 [temp1]=r12,16 // sal_ra | ||
487 | st8 [temp2]=r10,16 // sal_gp | ||
488 | mov r12=cr.isr | ||
489 | ;; | ||
490 | st8 [temp1]=r17,16 // pal_min_state | ||
491 | st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT | ||
492 | mov r6=cr.ifa | ||
493 | ;; | ||
494 | st8 [temp1]=r0,16 // prev_task, starts off as NULL | ||
495 | st8 [temp2]=r12,16 // cr.isr | ||
496 | mov r12=cr.itir | ||
497 | ;; | ||
498 | st8 [temp1]=r6,16 // cr.ifa | ||
499 | st8 [temp2]=r12,16 // cr.itir | ||
500 | mov r12=cr.iim | ||
501 | ;; | ||
502 | st8 [temp1]=r11,16 // cr.iipa | ||
503 | st8 [temp2]=r12,16 // cr.iim | ||
504 | mov r6=cr.iha | ||
505 | (p1) mov r12=IA64_MCA_COLD_BOOT | ||
506 | (p2) mov r12=IA64_INIT_WARM_BOOT | ||
507 | ;; | ||
508 | st8 [temp1]=r6,16 // cr.iha | ||
509 | st8 [temp2]=r12 // os_status, default is cold boot | ||
510 | mov r6=IA64_MCA_SAME_CONTEXT | ||
511 | ;; | ||
512 | st8 [temp1]=r6 // context, default is same context | ||
513 | |||
514 | // Save the pt_regs data that is not in minstate. The previous code | ||
515 | // left regs at sos. | ||
516 | add regs=MCA_PT_REGS_OFFSET-MCA_SOS_OFFSET, regs | ||
517 | ;; | ||
518 | add temp1=PT(B6), regs | ||
519 | mov temp3=b6 | ||
520 | mov temp4=b7 | ||
521 | add temp2=PT(B7), regs | ||
522 | ;; | ||
523 | st8 [temp1]=temp3,PT(AR_CSD)-PT(B6) // save b6 | ||
524 | st8 [temp2]=temp4,PT(AR_SSD)-PT(B7) // save b7 | ||
525 | mov temp3=ar.csd | ||
526 | mov temp4=ar.ssd | ||
527 | cover // must be last in group | ||
407 | ;; | 528 | ;; |
408 | add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2 | 529 | st8 [temp1]=temp3,PT(AR_UNAT)-PT(AR_CSD) // save ar.csd |
409 | ;; | 530 | st8 [temp2]=temp4,PT(AR_PFS)-PT(AR_SSD) // save ar.ssd |
410 | // save ar.NaT | 531 | mov temp3=ar.unat |
411 | mov r5=ar.unat // ar.unat | 532 | mov temp4=ar.pfs |
412 | 533 | ;; | |
413 | // save banked GRs 16-31 along with NaT bits | 534 | st8 [temp1]=temp3,PT(AR_RNAT)-PT(AR_UNAT) // save ar.unat |
414 | bsw.1;; | 535 | st8 [temp2]=temp4,PT(AR_BSPSTORE)-PT(AR_PFS) // save ar.pfs |
415 | st8.spill [r2]=r16,8;; | 536 | mov temp3=ar.rnat |
416 | st8.spill [r2]=r17,8;; | 537 | mov temp4=ar.bspstore |
417 | st8.spill [r2]=r18,8;; | 538 | ;; |
418 | st8.spill [r2]=r19,8;; | 539 | st8 [temp1]=temp3,PT(LOADRS)-PT(AR_RNAT) // save ar.rnat |
419 | st8.spill [r2]=r20,8;; | 540 | st8 [temp2]=temp4,PT(AR_FPSR)-PT(AR_BSPSTORE) // save ar.bspstore |
420 | st8.spill [r2]=r21,8;; | 541 | mov temp3=ar.bsp |
421 | st8.spill [r2]=r22,8;; | 542 | ;; |
422 | st8.spill [r2]=r23,8;; | 543 | sub temp3=temp3, temp4 // ar.bsp - ar.bspstore |
423 | st8.spill [r2]=r24,8;; | 544 | mov temp4=ar.fpsr |
424 | st8.spill [r2]=r25,8;; | 545 | ;; |
425 | st8.spill [r2]=r26,8;; | 546 | shl temp3=temp3,16 // compute ar.rsc to be used for "loadrs" |
426 | st8.spill [r2]=r27,8;; | 547 | ;; |
427 | st8.spill [r2]=r28,8;; | 548 | st8 [temp1]=temp3,PT(AR_CCV)-PT(LOADRS) // save loadrs |
428 | st8.spill [r2]=r29,8;; | 549 | st8 [temp2]=temp4,PT(F6)-PT(AR_FPSR) // save ar.fpsr |
429 | st8.spill [r2]=r30,8;; | 550 | mov temp3=ar.ccv |
430 | st8.spill [r2]=r31,8;; | 551 | ;; |
431 | 552 | st8 [temp1]=temp3,PT(F7)-PT(AR_CCV) // save ar.ccv | |
432 | mov r4=ar.unat;; | 553 | stf.spill [temp2]=f6,PT(F8)-PT(F6) |
433 | st8 [r2]=r4,8 // save User NaT bits for r16-r31 | 554 | ;; |
434 | mov ar.unat=r5 // restore original unat | 555 | stf.spill [temp1]=f7,PT(F9)-PT(F7) |
435 | bsw.0;; | 556 | stf.spill [temp2]=f8,PT(F10)-PT(F8) |
436 | 557 | ;; | |
437 | //save BRs | 558 | stf.spill [temp1]=f9,PT(F11)-PT(F9) |
438 | add r4=8,r2 // duplicate r2 in r4 | 559 | stf.spill [temp2]=f10 |
439 | add r6=2*8,r2 // duplicate r2 in r4 | 560 | ;; |
440 | 561 | stf.spill [temp1]=f11 | |
441 | mov r3=b0 | 562 | |
442 | mov r5=b1 | 563 | // Save the switch_stack data that is not in minstate nor pt_regs. The |
443 | mov r7=b2;; | 564 | // previous code left regs at pt_regs. |
444 | st8 [r2]=r3,3*8 | 565 | add regs=MCA_SWITCH_STACK_OFFSET-MCA_PT_REGS_OFFSET, regs |
445 | st8 [r4]=r5,3*8 | 566 | ;; |
446 | st8 [r6]=r7,3*8;; | 567 | add temp1=SW(F2), regs |
447 | 568 | add temp2=SW(F3), regs | |
448 | mov r3=b3 | 569 | ;; |
449 | mov r5=b4 | 570 | stf.spill [temp1]=f2,32 |
450 | mov r7=b5;; | 571 | stf.spill [temp2]=f3,32 |
451 | st8 [r2]=r3,3*8 | 572 | ;; |
452 | st8 [r4]=r5,3*8 | 573 | stf.spill [temp1]=f4,32 |
453 | st8 [r6]=r7,3*8;; | 574 | stf.spill [temp2]=f5,32 |
454 | 575 | ;; | |
455 | mov r3=b6 | 576 | stf.spill [temp1]=f12,32 |
456 | mov r5=b7;; | 577 | stf.spill [temp2]=f13,32 |
457 | st8 [r2]=r3,2*8 | 578 | ;; |
458 | st8 [r4]=r5,2*8;; | 579 | stf.spill [temp1]=f14,32 |
459 | 580 | stf.spill [temp2]=f15,32 | |
460 | cSaveCRs: | 581 | ;; |
461 | // save CRs | 582 | stf.spill [temp1]=f16,32 |
462 | add r4=8,r2 // duplicate r2 in r4 | 583 | stf.spill [temp2]=f17,32 |
463 | add r6=2*8,r2 // duplicate r2 in r4 | 584 | ;; |
464 | 585 | stf.spill [temp1]=f18,32 | |
465 | mov r3=cr.dcr | 586 | stf.spill [temp2]=f19,32 |
466 | mov r5=cr.itm | 587 | ;; |
467 | mov r7=cr.iva;; | 588 | stf.spill [temp1]=f20,32 |
468 | 589 | stf.spill [temp2]=f21,32 | |
469 | st8 [r2]=r3,8*8 | 590 | ;; |
470 | st8 [r4]=r5,3*8 | 591 | stf.spill [temp1]=f22,32 |
471 | st8 [r6]=r7,3*8;; // 48 byte rements | 592 | stf.spill [temp2]=f23,32 |
472 | 593 | ;; | |
473 | mov r3=cr.pta;; | 594 | stf.spill [temp1]=f24,32 |
474 | st8 [r2]=r3,8*8;; // 64 byte rements | 595 | stf.spill [temp2]=f25,32 |
475 | 596 | ;; | |
476 | // if PSR.ic=0, reading interruption registers causes an illegal operation fault | 597 | stf.spill [temp1]=f26,32 |
477 | mov r3=psr;; | 598 | stf.spill [temp2]=f27,32 |
478 | tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test | 599 | ;; |
479 | (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc. | 600 | stf.spill [temp1]=f28,32 |
480 | begin_skip_intr_regs: | 601 | stf.spill [temp2]=f29,32 |
481 | (p6) br SkipIntrRegs;; | 602 | ;; |
482 | 603 | stf.spill [temp1]=f30,SW(B2)-SW(F30) | |
483 | add r4=8,r2 // duplicate r2 in r4 | 604 | stf.spill [temp2]=f31,SW(B3)-SW(F31) |
484 | add r6=2*8,r2 // duplicate r2 in r6 | 605 | mov temp3=b2 |
485 | 606 | mov temp4=b3 | |
486 | mov r3=cr.ipsr | 607 | ;; |
487 | mov r5=cr.isr | 608 | st8 [temp1]=temp3,16 // save b2 |
488 | mov r7=r0;; | 609 | st8 [temp2]=temp4,16 // save b3 |
489 | st8 [r2]=r3,3*8 | 610 | mov temp3=b4 |
490 | st8 [r4]=r5,3*8 | 611 | mov temp4=b5 |
491 | st8 [r6]=r7,3*8;; | 612 | ;; |
492 | 613 | st8 [temp1]=temp3,SW(AR_LC)-SW(B4) // save b4 | |
493 | mov r3=cr.iip | 614 | st8 [temp2]=temp4 // save b5 |
494 | mov r5=cr.ifa | 615 | mov temp3=ar.lc |
495 | mov r7=cr.itir;; | 616 | ;; |
496 | st8 [r2]=r3,3*8 | 617 | st8 [temp1]=temp3 // save ar.lc |
497 | st8 [r4]=r5,3*8 | 618 | |
498 | st8 [r6]=r7,3*8;; | 619 | // FIXME: Some proms are incorrectly accessing the minstate area as |
499 | 620 | // cached data. The C code uses region 6, uncached virtual. Ensure | |
500 | mov r3=cr.iipa | 621 | // that there is no cache data lying around for the first 1K of the |
501 | mov r5=cr.ifs | 622 | // minstate area. |
502 | mov r7=cr.iim;; | 623 | // Remove this code in September 2006, that gives platforms a year to |
503 | st8 [r2]=r3,3*8 | 624 | // fix their proms and get their customers updated. |
504 | st8 [r4]=r5,3*8 | 625 | |
505 | st8 [r6]=r7,3*8;; | 626 | add r1=32*1,r17 |
506 | 627 | add r2=32*2,r17 | |
507 | mov r3=cr25;; // cr.iha | 628 | add r3=32*3,r17 |
508 | st8 [r2]=r3,160;; // 160 byte rement | 629 | add r4=32*4,r17 |
509 | 630 | add r5=32*5,r17 | |
510 | SkipIntrRegs: | 631 | add r6=32*6,r17 |
511 | st8 [r2]=r0,152;; // another 152 byte . | 632 | add r7=32*7,r17 |
512 | 633 | ;; | |
513 | add r4=8,r2 // duplicate r2 in r4 | 634 | fc r17 |
514 | add r6=2*8,r2 // duplicate r2 in r6 | 635 | fc r1 |
515 | 636 | fc r2 | |
516 | mov r3=cr.lid | 637 | fc r3 |
517 | // mov r5=cr.ivr // cr.ivr, don't read it | 638 | fc r4 |
518 | mov r7=cr.tpr;; | 639 | fc r5 |
519 | st8 [r2]=r3,3*8 | 640 | fc r6 |
520 | st8 [r4]=r5,3*8 | 641 | fc r7 |
521 | st8 [r6]=r7,3*8;; | 642 | add r17=32*8,r17 |
522 | 643 | add r1=32*8,r1 | |
523 | mov r3=r0 // cr.eoi => cr67 | 644 | add r2=32*8,r2 |
524 | mov r5=r0 // cr.irr0 => cr68 | 645 | add r3=32*8,r3 |
525 | mov r7=r0;; // cr.irr1 => cr69 | 646 | add r4=32*8,r4 |
526 | st8 [r2]=r3,3*8 | 647 | add r5=32*8,r5 |
527 | st8 [r4]=r5,3*8 | 648 | add r6=32*8,r6 |
528 | st8 [r6]=r7,3*8;; | 649 | add r7=32*8,r7 |
529 | 650 | ;; | |
530 | mov r3=r0 // cr.irr2 => cr70 | 651 | fc r17 |
531 | mov r5=r0 // cr.irr3 => cr71 | 652 | fc r1 |
532 | mov r7=cr.itv;; | 653 | fc r2 |
533 | st8 [r2]=r3,3*8 | 654 | fc r3 |
534 | st8 [r4]=r5,3*8 | 655 | fc r4 |
535 | st8 [r6]=r7,3*8;; | 656 | fc r5 |
536 | 657 | fc r6 | |
537 | mov r3=cr.pmv | 658 | fc r7 |
538 | mov r5=cr.cmcv;; | 659 | add r17=32*8,r17 |
539 | st8 [r2]=r3,7*8 | 660 | add r1=32*8,r1 |
540 | st8 [r4]=r5,7*8;; | 661 | add r2=32*8,r2 |
541 | 662 | add r3=32*8,r3 | |
542 | mov r3=r0 // cr.lrr0 => cr80 | 663 | add r4=32*8,r4 |
543 | mov r5=r0;; // cr.lrr1 => cr81 | 664 | add r5=32*8,r5 |
544 | st8 [r2]=r3,23*8 | 665 | add r6=32*8,r6 |
545 | st8 [r4]=r5,23*8;; | 666 | add r7=32*8,r7 |
546 | 667 | ;; | |
547 | adds r2=25*8,r2;; | 668 | fc r17 |
548 | 669 | fc r1 | |
549 | cSaveARs: | 670 | fc r2 |
550 | // save ARs | 671 | fc r3 |
551 | add r4=8,r2 // duplicate r2 in r4 | 672 | fc r4 |
552 | add r6=2*8,r2 // duplicate r2 in r6 | 673 | fc r5 |
553 | 674 | fc r6 | |
554 | mov r3=ar.k0 | 675 | fc r7 |
555 | mov r5=ar.k1 | 676 | add r17=32*8,r17 |
556 | mov r7=ar.k2;; | 677 | add r1=32*8,r1 |
557 | st8 [r2]=r3,3*8 | 678 | add r2=32*8,r2 |
558 | st8 [r4]=r5,3*8 | 679 | add r3=32*8,r3 |
559 | st8 [r6]=r7,3*8;; | 680 | add r4=32*8,r4 |
560 | 681 | add r5=32*8,r5 | |
561 | mov r3=ar.k3 | 682 | add r6=32*8,r6 |
562 | mov r5=ar.k4 | 683 | add r7=32*8,r7 |
563 | mov r7=ar.k5;; | 684 | ;; |
564 | st8 [r2]=r3,3*8 | 685 | fc r17 |
565 | st8 [r4]=r5,3*8 | 686 | fc r1 |
566 | st8 [r6]=r7,3*8;; | 687 | fc r2 |
567 | 688 | fc r3 | |
568 | mov r3=ar.k6 | 689 | fc r4 |
569 | mov r5=ar.k7 | 690 | fc r5 |
570 | mov r7=r0;; // ar.kr8 | 691 | fc r6 |
571 | st8 [r2]=r3,10*8 | 692 | fc r7 |
572 | st8 [r4]=r5,10*8 | 693 | |
573 | st8 [r6]=r7,10*8;; // rement by 72 bytes | 694 | br.sptk b0 |
574 | |||
575 | mov r3=ar.rsc | ||
576 | mov ar.rsc=r0 // put RSE in enforced lazy mode | ||
577 | mov r5=ar.bsp | ||
578 | ;; | ||
579 | mov r7=ar.bspstore;; | ||
580 | st8 [r2]=r3,3*8 | ||
581 | st8 [r4]=r5,3*8 | ||
582 | st8 [r6]=r7,3*8;; | ||
583 | |||
584 | mov r3=ar.rnat;; | ||
585 | st8 [r2]=r3,8*13 // increment by 13x8 bytes | ||
586 | |||
587 | mov r3=ar.ccv;; | ||
588 | st8 [r2]=r3,8*4 | ||
589 | |||
590 | mov r3=ar.unat;; | ||
591 | st8 [r2]=r3,8*4 | ||
592 | |||
593 | mov r3=ar.fpsr;; | ||
594 | st8 [r2]=r3,8*4 | ||
595 | |||
596 | mov r3=ar.itc;; | ||
597 | st8 [r2]=r3,160 // 160 | ||
598 | |||
599 | mov r3=ar.pfs;; | ||
600 | st8 [r2]=r3,8 | ||
601 | |||
602 | mov r3=ar.lc;; | ||
603 | st8 [r2]=r3,8 | ||
604 | |||
605 | mov r3=ar.ec;; | ||
606 | st8 [r2]=r3 | ||
607 | add r2=8*62,r2 //padding | ||
608 | |||
609 | // save RRs | ||
610 | mov ar.lc=0x08-1 | ||
611 | movl r4=0x00;; | ||
612 | |||
613 | cStRR: | ||
614 | dep.z r5=r4,61,3;; | ||
615 | mov r3=rr[r5];; | ||
616 | st8 [r2]=r3,8 | ||
617 | add r4=1,r4 | ||
618 | br.cloop.sptk.few cStRR | ||
619 | ;; | ||
620 | end_os_mca_dump: | ||
621 | br ia64_os_mca_done_dump;; | ||
622 | 695 | ||
623 | //EndStub////////////////////////////////////////////////////////////////////// | 696 | //EndStub////////////////////////////////////////////////////////////////////// |
624 | 697 | ||
625 | 698 | ||
626 | //++ | 699 | //++ |
627 | // Name: | 700 | // Name: |
628 | // ia64_os_mca_proc_state_restore() | 701 | // ia64_state_restore() |
629 | // | 702 | // |
630 | // Stub Description: | 703 | // Stub Description: |
631 | // | 704 | // |
632 | // This is a stub to restore the saved processor state during MCHK | 705 | // Restore the SAL/OS state. This is sensitive to the layout of struct |
706 | // ia64_sal_os_state in mca.h. | ||
707 | // | ||
708 | // r2 contains the return address, r3 contains either | ||
709 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. | ||
710 | // | ||
711 | // In addition to the SAL to OS state, this routine restores all the | ||
712 | // registers that appear in struct pt_regs and struct switch_stack, | ||
713 | // excluding those in the PAL minstate area. | ||
633 | // | 714 | // |
634 | //-- | 715 | //-- |
635 | 716 | ||
636 | ia64_os_mca_proc_state_restore: | 717 | ia64_state_restore: |
718 | // Restore the switch_stack data that is not in minstate nor pt_regs. | ||
719 | add regs=MCA_SWITCH_STACK_OFFSET, r3 | ||
720 | mov b0=r2 // save return address | ||
721 | ;; | ||
722 | GET_IA64_MCA_DATA(temp2) | ||
723 | ;; | ||
724 | add regs=temp2, regs | ||
725 | ;; | ||
726 | add temp1=SW(F2), regs | ||
727 | add temp2=SW(F3), regs | ||
728 | ;; | ||
729 | ldf.fill f2=[temp1],32 | ||
730 | ldf.fill f3=[temp2],32 | ||
731 | ;; | ||
732 | ldf.fill f4=[temp1],32 | ||
733 | ldf.fill f5=[temp2],32 | ||
734 | ;; | ||
735 | ldf.fill f12=[temp1],32 | ||
736 | ldf.fill f13=[temp2],32 | ||
737 | ;; | ||
738 | ldf.fill f14=[temp1],32 | ||
739 | ldf.fill f15=[temp2],32 | ||
740 | ;; | ||
741 | ldf.fill f16=[temp1],32 | ||
742 | ldf.fill f17=[temp2],32 | ||
743 | ;; | ||
744 | ldf.fill f18=[temp1],32 | ||
745 | ldf.fill f19=[temp2],32 | ||
746 | ;; | ||
747 | ldf.fill f20=[temp1],32 | ||
748 | ldf.fill f21=[temp2],32 | ||
749 | ;; | ||
750 | ldf.fill f22=[temp1],32 | ||
751 | ldf.fill f23=[temp2],32 | ||
752 | ;; | ||
753 | ldf.fill f24=[temp1],32 | ||
754 | ldf.fill f25=[temp2],32 | ||
755 | ;; | ||
756 | ldf.fill f26=[temp1],32 | ||
757 | ldf.fill f27=[temp2],32 | ||
758 | ;; | ||
759 | ldf.fill f28=[temp1],32 | ||
760 | ldf.fill f29=[temp2],32 | ||
761 | ;; | ||
762 | ldf.fill f30=[temp1],SW(B2)-SW(F30) | ||
763 | ldf.fill f31=[temp2],SW(B3)-SW(F31) | ||
764 | ;; | ||
765 | ld8 temp3=[temp1],16 // restore b2 | ||
766 | ld8 temp4=[temp2],16 // restore b3 | ||
767 | ;; | ||
768 | mov b2=temp3 | ||
769 | mov b3=temp4 | ||
770 | ld8 temp3=[temp1],SW(AR_LC)-SW(B4) // restore b4 | ||
771 | ld8 temp4=[temp2] // restore b5 | ||
772 | ;; | ||
773 | mov b4=temp3 | ||
774 | mov b5=temp4 | ||
775 | ld8 temp3=[temp1] // restore ar.lc | ||
776 | ;; | ||
777 | mov ar.lc=temp3 | ||
637 | 778 | ||
638 | // Restore bank1 GR16-31 | 779 | // Restore the pt_regs data that is not in minstate. The previous code |
639 | GET_IA64_MCA_DATA(r2) | 780 | // left regs at switch_stack. |
781 | add regs=MCA_PT_REGS_OFFSET-MCA_SWITCH_STACK_OFFSET, regs | ||
782 | ;; | ||
783 | add temp1=PT(B6), regs | ||
784 | add temp2=PT(B7), regs | ||
785 | ;; | ||
786 | ld8 temp3=[temp1],PT(AR_CSD)-PT(B6) // restore b6 | ||
787 | ld8 temp4=[temp2],PT(AR_SSD)-PT(B7) // restore b7 | ||
788 | ;; | ||
789 | mov b6=temp3 | ||
790 | mov b7=temp4 | ||
791 | ld8 temp3=[temp1],PT(AR_UNAT)-PT(AR_CSD) // restore ar.csd | ||
792 | ld8 temp4=[temp2],PT(AR_PFS)-PT(AR_SSD) // restore ar.ssd | ||
793 | ;; | ||
794 | mov ar.csd=temp3 | ||
795 | mov ar.ssd=temp4 | ||
796 | ld8 temp3=[temp1] // restore ar.unat | ||
797 | add temp1=PT(AR_CCV)-PT(AR_UNAT), temp1 | ||
798 | ld8 temp4=[temp2],PT(AR_FPSR)-PT(AR_PFS) // restore ar.pfs | ||
799 | ;; | ||
800 | mov ar.unat=temp3 | ||
801 | mov ar.pfs=temp4 | ||
802 | // ar.rnat, ar.bspstore, loadrs are restore in ia64_old_stack. | ||
803 | ld8 temp3=[temp1],PT(F6)-PT(AR_CCV) // restore ar.ccv | ||
804 | ld8 temp4=[temp2],PT(F7)-PT(AR_FPSR) // restore ar.fpsr | ||
805 | ;; | ||
806 | mov ar.ccv=temp3 | ||
807 | mov ar.fpsr=temp4 | ||
808 | ldf.fill f6=[temp1],PT(F8)-PT(F6) | ||
809 | ldf.fill f7=[temp2],PT(F9)-PT(F7) | ||
810 | ;; | ||
811 | ldf.fill f8=[temp1],PT(F10)-PT(F8) | ||
812 | ldf.fill f9=[temp2],PT(F11)-PT(F9) | ||
813 | ;; | ||
814 | ldf.fill f10=[temp1] | ||
815 | ldf.fill f11=[temp2] | ||
816 | |||
817 | // Restore the SAL to OS state. The previous code left regs at pt_regs. | ||
818 | add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs | ||
640 | ;; | 819 | ;; |
641 | add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2 | 820 | add temp1=IA64_SAL_OS_STATE_COMMON_OFFSET, regs |
642 | 821 | add temp2=IA64_SAL_OS_STATE_COMMON_OFFSET+8, regs | |
643 | restore_GRs: // restore bank-1 GRs 16-31 | 822 | ;; |
644 | bsw.1;; | 823 | ld8 r12=[temp1],16 // sal_ra |
645 | add r3=16*8,r2;; // to get to NaT of GR 16-31 | 824 | ld8 r9=[temp2],16 // sal_gp |
646 | ld8 r3=[r3];; | 825 | ;; |
647 | mov ar.unat=r3;; // first restore NaT | 826 | ld8 r22=[temp1],24 // pal_min_state, virtual. skip prev_task |
648 | 827 | ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT | |
649 | ld8.fill r16=[r2],8;; | 828 | ;; |
650 | ld8.fill r17=[r2],8;; | 829 | ld8 temp3=[temp1],16 // cr.isr |
651 | ld8.fill r18=[r2],8;; | 830 | ld8 temp4=[temp2],16 // cr.ifa |
652 | ld8.fill r19=[r2],8;; | 831 | ;; |
653 | ld8.fill r20=[r2],8;; | 832 | mov cr.isr=temp3 |
654 | ld8.fill r21=[r2],8;; | 833 | mov cr.ifa=temp4 |
655 | ld8.fill r22=[r2],8;; | 834 | ld8 temp3=[temp1],16 // cr.itir |
656 | ld8.fill r23=[r2],8;; | 835 | ld8 temp4=[temp2],16 // cr.iipa |
657 | ld8.fill r24=[r2],8;; | 836 | ;; |
658 | ld8.fill r25=[r2],8;; | 837 | mov cr.itir=temp3 |
659 | ld8.fill r26=[r2],8;; | 838 | mov cr.iipa=temp4 |
660 | ld8.fill r27=[r2],8;; | 839 | ld8 temp3=[temp1],16 // cr.iim |
661 | ld8.fill r28=[r2],8;; | 840 | ld8 temp4=[temp2],16 // cr.iha |
662 | ld8.fill r29=[r2],8;; | 841 | ;; |
663 | ld8.fill r30=[r2],8;; | 842 | mov cr.iim=temp3 |
664 | ld8.fill r31=[r2],8;; | 843 | mov cr.iha=temp4 |
665 | 844 | dep r22=0,r22,62,2 // pal_min_state, physical, uncached | |
666 | ld8 r3=[r2],8;; // increment to skip NaT | 845 | mov IA64_KR(CURRENT)=r21 |
667 | bsw.0;; | 846 | ld8 r8=[temp1] // os_status |
668 | 847 | ld8 r10=[temp2] // context | |
669 | restore_BRs: | 848 | |
670 | add r4=8,r2 // duplicate r2 in r4 | 849 | br.sptk b0 |
671 | add r6=2*8,r2;; // duplicate r2 in r4 | ||
672 | |||
673 | ld8 r3=[r2],3*8 | ||
674 | ld8 r5=[r4],3*8 | ||
675 | ld8 r7=[r6],3*8;; | ||
676 | mov b0=r3 | ||
677 | mov b1=r5 | ||
678 | mov b2=r7;; | ||
679 | |||
680 | ld8 r3=[r2],3*8 | ||
681 | ld8 r5=[r4],3*8 | ||
682 | ld8 r7=[r6],3*8;; | ||
683 | mov b3=r3 | ||
684 | mov b4=r5 | ||
685 | mov b5=r7;; | ||
686 | |||
687 | ld8 r3=[r2],2*8 | ||
688 | ld8 r5=[r4],2*8;; | ||
689 | mov b6=r3 | ||
690 | mov b7=r5;; | ||
691 | |||
692 | restore_CRs: | ||
693 | add r4=8,r2 // duplicate r2 in r4 | ||
694 | add r6=2*8,r2;; // duplicate r2 in r4 | ||
695 | |||
696 | ld8 r3=[r2],8*8 | ||
697 | ld8 r5=[r4],3*8 | ||
698 | ld8 r7=[r6],3*8;; // 48 byte increments | ||
699 | mov cr.dcr=r3 | ||
700 | mov cr.itm=r5 | ||
701 | mov cr.iva=r7;; | ||
702 | |||
703 | ld8 r3=[r2],8*8;; // 64 byte increments | ||
704 | // mov cr.pta=r3 | ||
705 | |||
706 | |||
707 | // if PSR.ic=1, reading interruption registers causes an illegal operation fault | ||
708 | mov r3=psr;; | ||
709 | tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test | ||
710 | (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc. | ||
711 | |||
712 | begin_rskip_intr_regs: | ||
713 | (p6) br rSkipIntrRegs;; | ||
714 | |||
715 | add r4=8,r2 // duplicate r2 in r4 | ||
716 | add r6=2*8,r2;; // duplicate r2 in r4 | ||
717 | |||
718 | ld8 r3=[r2],3*8 | ||
719 | ld8 r5=[r4],3*8 | ||
720 | ld8 r7=[r6],3*8;; | ||
721 | mov cr.ipsr=r3 | ||
722 | // mov cr.isr=r5 // cr.isr is read only | ||
723 | |||
724 | ld8 r3=[r2],3*8 | ||
725 | ld8 r5=[r4],3*8 | ||
726 | ld8 r7=[r6],3*8;; | ||
727 | mov cr.iip=r3 | ||
728 | mov cr.ifa=r5 | ||
729 | mov cr.itir=r7;; | ||
730 | |||
731 | ld8 r3=[r2],3*8 | ||
732 | ld8 r5=[r4],3*8 | ||
733 | ld8 r7=[r6],3*8;; | ||
734 | mov cr.iipa=r3 | ||
735 | mov cr.ifs=r5 | ||
736 | mov cr.iim=r7 | ||
737 | |||
738 | ld8 r3=[r2],160;; // 160 byte increment | ||
739 | mov cr.iha=r3 | ||
740 | |||
741 | rSkipIntrRegs: | ||
742 | ld8 r3=[r2],152;; // another 152 byte inc. | ||
743 | |||
744 | add r4=8,r2 // duplicate r2 in r4 | ||
745 | add r6=2*8,r2;; // duplicate r2 in r6 | ||
746 | |||
747 | ld8 r3=[r2],8*3 | ||
748 | ld8 r5=[r4],8*3 | ||
749 | ld8 r7=[r6],8*3;; | ||
750 | mov cr.lid=r3 | ||
751 | // mov cr.ivr=r5 // cr.ivr is read only | ||
752 | mov cr.tpr=r7;; | ||
753 | |||
754 | ld8 r3=[r2],8*3 | ||
755 | ld8 r5=[r4],8*3 | ||
756 | ld8 r7=[r6],8*3;; | ||
757 | // mov cr.eoi=r3 | ||
758 | // mov cr.irr0=r5 // cr.irr0 is read only | ||
759 | // mov cr.irr1=r7;; // cr.irr1 is read only | ||
760 | |||
761 | ld8 r3=[r2],8*3 | ||
762 | ld8 r5=[r4],8*3 | ||
763 | ld8 r7=[r6],8*3;; | ||
764 | // mov cr.irr2=r3 // cr.irr2 is read only | ||
765 | // mov cr.irr3=r5 // cr.irr3 is read only | ||
766 | mov cr.itv=r7;; | ||
767 | |||
768 | ld8 r3=[r2],8*7 | ||
769 | ld8 r5=[r4],8*7;; | ||
770 | mov cr.pmv=r3 | ||
771 | mov cr.cmcv=r5;; | ||
772 | |||
773 | ld8 r3=[r2],8*23 | ||
774 | ld8 r5=[r4],8*23;; | ||
775 | adds r2=8*23,r2 | ||
776 | adds r4=8*23,r4;; | ||
777 | // mov cr.lrr0=r3 | ||
778 | // mov cr.lrr1=r5 | ||
779 | |||
780 | adds r2=8*2,r2;; | ||
781 | |||
782 | restore_ARs: | ||
783 | add r4=8,r2 // duplicate r2 in r4 | ||
784 | add r6=2*8,r2;; // duplicate r2 in r4 | ||
785 | |||
786 | ld8 r3=[r2],3*8 | ||
787 | ld8 r5=[r4],3*8 | ||
788 | ld8 r7=[r6],3*8;; | ||
789 | mov ar.k0=r3 | ||
790 | mov ar.k1=r5 | ||
791 | mov ar.k2=r7;; | ||
792 | |||
793 | ld8 r3=[r2],3*8 | ||
794 | ld8 r5=[r4],3*8 | ||
795 | ld8 r7=[r6],3*8;; | ||
796 | mov ar.k3=r3 | ||
797 | mov ar.k4=r5 | ||
798 | mov ar.k5=r7;; | ||
799 | |||
800 | ld8 r3=[r2],10*8 | ||
801 | ld8 r5=[r4],10*8 | ||
802 | ld8 r7=[r6],10*8;; | ||
803 | mov ar.k6=r3 | ||
804 | mov ar.k7=r5 | ||
805 | ;; | ||
806 | |||
807 | ld8 r3=[r2],3*8 | ||
808 | ld8 r5=[r4],3*8 | ||
809 | ld8 r7=[r6],3*8;; | ||
810 | // mov ar.rsc=r3 | ||
811 | // mov ar.bsp=r5 // ar.bsp is read only | ||
812 | mov ar.rsc=r0 // make sure that RSE is in enforced lazy mode | ||
813 | ;; | ||
814 | mov ar.bspstore=r7;; | ||
815 | |||
816 | ld8 r9=[r2],8*13;; | ||
817 | mov ar.rnat=r9 | ||
818 | |||
819 | mov ar.rsc=r3 | ||
820 | ld8 r3=[r2],8*4;; | ||
821 | mov ar.ccv=r3 | ||
822 | |||
823 | ld8 r3=[r2],8*4;; | ||
824 | mov ar.unat=r3 | ||
825 | |||
826 | ld8 r3=[r2],8*4;; | ||
827 | mov ar.fpsr=r3 | ||
828 | |||
829 | ld8 r3=[r2],160;; // 160 | ||
830 | // mov ar.itc=r3 | ||
831 | |||
832 | ld8 r3=[r2],8;; | ||
833 | mov ar.pfs=r3 | ||
834 | |||
835 | ld8 r3=[r2],8;; | ||
836 | mov ar.lc=r3 | ||
837 | |||
838 | ld8 r3=[r2];; | ||
839 | mov ar.ec=r3 | ||
840 | add r2=8*62,r2;; // padding | ||
841 | |||
842 | restore_RRs: | ||
843 | mov r5=ar.lc | ||
844 | mov ar.lc=0x08-1 | ||
845 | movl r4=0x00;; | ||
846 | cStRRr: | ||
847 | dep.z r7=r4,61,3 | ||
848 | ld8 r3=[r2],8;; | ||
849 | mov rr[r7]=r3 // what are its access previledges? | ||
850 | add r4=1,r4 | ||
851 | br.cloop.sptk.few cStRRr | ||
852 | ;; | ||
853 | mov ar.lc=r5 | ||
854 | ;; | ||
855 | end_os_mca_restore: | ||
856 | br ia64_os_mca_done_restore;; | ||
857 | 850 | ||
858 | //EndStub////////////////////////////////////////////////////////////////////// | 851 | //EndStub////////////////////////////////////////////////////////////////////// |
859 | 852 | ||
860 | 853 | ||
861 | // ok, the issue here is that we need to save state information so | 854 | //++ |
862 | // it can be useable by the kernel debugger and show regs routines. | 855 | // Name: |
863 | // In order to do this, our best bet is save the current state (plus | 856 | // ia64_new_stack() |
864 | // the state information obtain from the MIN_STATE_AREA) into a pt_regs | ||
865 | // format. This way we can pass it on in a useable format. | ||
866 | // | 857 | // |
867 | 858 | // Stub Description: | |
868 | // | 859 | // |
869 | // SAL to OS entry point for INIT on the monarch processor | 860 | // Switch to the MCA/INIT stack. |
870 | // This has been defined for registration purposes with SAL | ||
871 | // as a part of ia64_mca_init. | ||
872 | // | 861 | // |
873 | // When we get here, the following registers have been | 862 | // r2 contains the return address, r3 contains either |
874 | // set by the SAL for our use | 863 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. |
875 | // | 864 | // |
876 | // 1. GR1 = OS INIT GP | 865 | // On entry RBS is still on the original stack, this routine switches RBS |
877 | // 2. GR8 = PAL_PROC physical address | 866 | // to use the MCA/INIT stack. |
878 | // 3. GR9 = SAL_PROC physical address | ||
879 | // 4. GR10 = SAL GP (physical) | ||
880 | // 5. GR11 = Init Reason | ||
881 | // 0 = Received INIT for event other than crash dump switch | ||
882 | // 1 = Received wakeup at the end of an OS_MCA corrected machine check | ||
883 | // 2 = Received INIT dude to CrashDump switch assertion | ||
884 | // | 867 | // |
885 | // 6. GR12 = Return address to location within SAL_INIT procedure | 868 | // On entry, sos->pal_min_state is physical, on exit it is virtual. |
886 | 869 | // | |
870 | //-- | ||
887 | 871 | ||
888 | GLOBAL_ENTRY(ia64_monarch_init_handler) | 872 | ia64_new_stack: |
889 | .prologue | 873 | add regs=MCA_PT_REGS_OFFSET, r3 |
890 | // stash the information the SAL passed to os | 874 | add temp2=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, r3 |
891 | SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) | 875 | mov b0=r2 // save return address |
876 | GET_IA64_MCA_DATA(temp1) | ||
877 | invala | ||
892 | ;; | 878 | ;; |
893 | SAVE_MIN_WITH_COVER | 879 | add temp2=temp2, temp1 // struct ia64_sal_os_state.pal_min_state on MCA or INIT stack |
880 | add regs=regs, temp1 // struct pt_regs on MCA or INIT stack | ||
894 | ;; | 881 | ;; |
895 | mov r8=cr.ifa | 882 | // Address of minstate area provided by PAL is physical, uncacheable. |
896 | mov r9=cr.isr | 883 | // Convert to Linux virtual address in region 6 for C code. |
897 | adds r3=8,r2 // set up second base pointer | 884 | ld8 ms=[temp2] // pal_min_state, physical |
898 | ;; | 885 | ;; |
899 | SAVE_REST | 886 | dep temp1=-1,ms,62,2 // set region 6 |
900 | 887 | mov temp3=IA64_RBS_OFFSET-MCA_PT_REGS_OFFSET | |
901 | // ok, enough should be saved at this point to be dangerous, and supply | 888 | ;; |
902 | // information for a dump | 889 | st8 [temp2]=temp1 // pal_min_state, virtual |
903 | // We need to switch to Virtual mode before hitting the C functions. | ||
904 | 890 | ||
905 | movl r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN | 891 | add temp4=temp3, regs // start of bspstore on new stack |
906 | mov r3=psr // get the current psr, minimum enabled at this point | ||
907 | ;; | 892 | ;; |
908 | or r2=r2,r3 | 893 | mov ar.bspstore=temp4 // switch RBS to MCA/INIT stack |
909 | ;; | 894 | ;; |
910 | movl r3=IVirtual_Switch | 895 | flushrs // must be first in group |
896 | br.sptk b0 | ||
897 | |||
898 | //EndStub////////////////////////////////////////////////////////////////////// | ||
899 | |||
900 | |||
901 | //++ | ||
902 | // Name: | ||
903 | // ia64_old_stack() | ||
904 | // | ||
905 | // Stub Description: | ||
906 | // | ||
907 | // Switch to the old stack. | ||
908 | // | ||
909 | // r2 contains the return address, r3 contains either | ||
910 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. | ||
911 | // | ||
912 | // On entry, pal_min_state is virtual, on exit it is physical. | ||
913 | // | ||
914 | // On entry RBS is on the MCA/INIT stack, this routine switches RBS | ||
915 | // back to the previous stack. | ||
916 | // | ||
917 | // The psr is set to all zeroes. SAL return requires either all zeroes or | ||
918 | // just psr.mc set. Leaving psr.mc off allows INIT to be issued if this | ||
919 | // code does not perform correctly. | ||
920 | // | ||
921 | // The dirty registers at the time of the event were flushed to the | ||
922 | // MCA/INIT stack in ia64_pt_regs_save(). Restore the dirty registers | ||
923 | // before reverting to the previous bspstore. | ||
924 | //-- | ||
925 | |||
926 | ia64_old_stack: | ||
927 | add regs=MCA_PT_REGS_OFFSET, r3 | ||
928 | mov b0=r2 // save return address | ||
929 | GET_IA64_MCA_DATA(temp2) | ||
930 | LOAD_PHYSICAL(p0,temp1,1f) | ||
911 | ;; | 931 | ;; |
912 | mov cr.iip=r3 // short return to set the appropriate bits | 932 | mov cr.ipsr=r0 |
913 | mov cr.ipsr=r2 // need to do an rfi to set appropriate bits | 933 | mov cr.ifs=r0 |
934 | mov cr.iip=temp1 | ||
914 | ;; | 935 | ;; |
936 | invala | ||
915 | rfi | 937 | rfi |
938 | 1: | ||
939 | |||
940 | add regs=regs, temp2 // struct pt_regs on MCA or INIT stack | ||
916 | ;; | 941 | ;; |
917 | IVirtual_Switch: | 942 | add temp1=PT(LOADRS), regs |
918 | // | ||
919 | // We should now be running virtual | ||
920 | // | ||
921 | // Let's call the C handler to get the rest of the state info | ||
922 | // | ||
923 | alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) | ||
924 | ;; | 943 | ;; |
925 | adds out0=16,sp // out0 = pointer to pt_regs | 944 | ld8 temp2=[temp1],PT(AR_BSPSTORE)-PT(LOADRS) // restore loadrs |
926 | ;; | 945 | ;; |
927 | DO_SAVE_SWITCH_STACK | 946 | ld8 temp3=[temp1],PT(AR_RNAT)-PT(AR_BSPSTORE) // restore ar.bspstore |
928 | .body | 947 | mov ar.rsc=temp2 |
929 | adds out1=16,sp // out0 = pointer to switch_stack | 948 | ;; |
949 | loadrs | ||
950 | ld8 temp4=[temp1] // restore ar.rnat | ||
951 | ;; | ||
952 | mov ar.bspstore=temp3 // back to old stack | ||
953 | ;; | ||
954 | mov ar.rnat=temp4 | ||
955 | ;; | ||
956 | |||
957 | br.sptk b0 | ||
930 | 958 | ||
931 | br.call.sptk.many rp=ia64_init_handler | 959 | //EndStub////////////////////////////////////////////////////////////////////// |
932 | .ret1: | ||
933 | 960 | ||
934 | return_from_init: | ||
935 | br.sptk return_from_init | ||
936 | END(ia64_monarch_init_handler) | ||
937 | 961 | ||
962 | //++ | ||
963 | // Name: | ||
964 | // ia64_set_kernel_registers() | ||
938 | // | 965 | // |
939 | // SAL to OS entry point for INIT on the slave processor | 966 | // Stub Description: |
940 | // This has been defined for registration purposes with SAL | 967 | // |
941 | // as a part of ia64_mca_init. | 968 | // Set the registers that are required by the C code in order to run on an |
969 | // MCA/INIT stack. | ||
970 | // | ||
971 | // r2 contains the return address, r3 contains either | ||
972 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. | ||
942 | // | 973 | // |
974 | //-- | ||
975 | |||
976 | ia64_set_kernel_registers: | ||
977 | add temp3=MCA_SP_OFFSET, r3 | ||
978 | add temp4=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_OS_GP_OFFSET, r3 | ||
979 | mov b0=r2 // save return address | ||
980 | GET_IA64_MCA_DATA(temp1) | ||
981 | ;; | ||
982 | add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp | ||
983 | add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack | ||
984 | add r13=temp1, r3 // set current to start of MCA/INIT stack | ||
985 | ;; | ||
986 | ld8 r1=[temp4] // OS GP from SAL OS state | ||
987 | ;; | ||
988 | DATA_PA_TO_VA(r1,temp1) | ||
989 | DATA_PA_TO_VA(r12,temp2) | ||
990 | DATA_PA_TO_VA(r13,temp3) | ||
991 | ;; | ||
992 | mov IA64_KR(CURRENT)=r13 | ||
993 | |||
994 | // FIXME: do I need to wire IA64_KR_CURRENT_STACK and IA64_TR_CURRENT_STACK? | ||
995 | |||
996 | br.sptk b0 | ||
997 | |||
998 | //EndStub////////////////////////////////////////////////////////////////////// | ||
999 | |||
1000 | #undef ms | ||
1001 | #undef regs | ||
1002 | #undef temp1 | ||
1003 | #undef temp2 | ||
1004 | #undef temp3 | ||
1005 | #undef temp4 | ||
1006 | |||
943 | 1007 | ||
944 | GLOBAL_ENTRY(ia64_slave_init_handler) | 1008 | // Support function for mca.c, it is here to avoid using inline asm. Given the |
945 | 1: br.sptk 1b | 1009 | // address of an rnat slot, if that address is below the current ar.bspstore |
946 | END(ia64_slave_init_handler) | 1010 | // then return the contents of that slot, otherwise return the contents of |
1011 | // ar.rnat. | ||
1012 | GLOBAL_ENTRY(ia64_get_rnat) | ||
1013 | alloc r14=ar.pfs,1,0,0,0 | ||
1014 | mov ar.rsc=0 | ||
1015 | ;; | ||
1016 | mov r14=ar.bspstore | ||
1017 | ;; | ||
1018 | cmp.lt p6,p7=in0,r14 | ||
1019 | ;; | ||
1020 | (p6) ld8 r8=[in0] | ||
1021 | (p7) mov r8=ar.rnat | ||
1022 | mov ar.rsc=3 | ||
1023 | br.ret.sptk.many rp | ||
1024 | END(ia64_get_rnat) | ||