diff options
author | Keith Owens <kaos@sgi.com> | 2005-09-11 03:22:53 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-09-11 17:08:41 -0400 |
commit | 7f613c7d2203ae137d98fc1c38abc30fd7048637 (patch) | |
tree | d8155a5cca33e4fe178625396886fcbb81f39e7a /include/asm-ia64/mca_asm.h | |
parent | 289d773ee89ea80dcc364ef97d1be7ad1817387e (diff) |
[PATCH] MCA/INIT: use per cpu stacks
The bulk of the change. Use per cpu MCA/INIT stacks. Change the SAL
to OS state (sos) to be per process. Do all the assembler work on the
MCA/INIT stacks, leaving the original stack alone. Pass per cpu state
data to the C handlers for MCA and INIT, which also means changing the
mca_drv interfaces slightly. Lots of verification on whether the
original stack is usable before converting it to a sleeping process.
Signed-off-by: Keith Owens <kaos@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'include/asm-ia64/mca_asm.h')
-rw-r--r-- | include/asm-ia64/mca_asm.h | 125 |
1 files changed, 27 insertions, 98 deletions
diff --git a/include/asm-ia64/mca_asm.h b/include/asm-ia64/mca_asm.h index 836953e0f91f..27c9203d8ce3 100644 --- a/include/asm-ia64/mca_asm.h +++ b/include/asm-ia64/mca_asm.h | |||
@@ -8,6 +8,8 @@ | |||
8 | * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> | 8 | * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> |
9 | * Copyright (C) 2002 Intel Corp. | 9 | * Copyright (C) 2002 Intel Corp. |
10 | * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com> | 10 | * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com> |
11 | * Copyright (C) 2005 Silicon Graphics, Inc | ||
12 | * Copyright (C) 2005 Keith Owens <kaos@sgi.com> | ||
11 | */ | 13 | */ |
12 | #ifndef _ASM_IA64_MCA_ASM_H | 14 | #ifndef _ASM_IA64_MCA_ASM_H |
13 | #define _ASM_IA64_MCA_ASM_H | 15 | #define _ASM_IA64_MCA_ASM_H |
@@ -207,106 +209,33 @@ | |||
207 | ;; | 209 | ;; |
208 | 210 | ||
209 | /* | 211 | /* |
210 | * The following offsets capture the order in which the | 212 | * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel |
211 | * RSE related registers from the old context are | 213 | * stacks, except that the SAL/OS state and a switch_stack are stored near the |
212 | * saved onto the new stack frame. | 214 | * top of the MCA/INIT stack. To support concurrent entry to MCA or INIT, as |
215 | * well as MCA over INIT, each event needs its own SAL/OS state. All entries | ||
216 | * are 16 byte aligned. | ||
213 | * | 217 | * |
214 | * +-----------------------+ | 218 | * +---------------------------+ |
215 | * |NDIRTY [BSP - BSPSTORE]| | 219 | * | pt_regs | |
216 | * +-----------------------+ | 220 | * +---------------------------+ |
217 | * | RNAT | | 221 | * | switch_stack | |
218 | * +-----------------------+ | 222 | * +---------------------------+ |
219 | * | BSPSTORE | | 223 | * | SAL/OS state | |
220 | * +-----------------------+ | 224 | * +---------------------------+ |
221 | * | IFS | | 225 | * | 16 byte scratch area | |
222 | * +-----------------------+ | 226 | * +---------------------------+ <-------- SP at start of C MCA handler |
223 | * | PFS | | 227 | * | ..... | |
224 | * +-----------------------+ | 228 | * +---------------------------+ |
225 | * | RSC | | 229 | * | RBS for MCA/INIT handler | |
226 | * +-----------------------+ <-------- Bottom of new stack frame | 230 | * +---------------------------+ |
231 | * | struct task for MCA/INIT | | ||
232 | * +---------------------------+ <-------- Bottom of MCA/INIT stack | ||
227 | */ | 233 | */ |
228 | #define rse_rsc_offset 0 | ||
229 | #define rse_pfs_offset (rse_rsc_offset+0x08) | ||
230 | #define rse_ifs_offset (rse_pfs_offset+0x08) | ||
231 | #define rse_bspstore_offset (rse_ifs_offset+0x08) | ||
232 | #define rse_rnat_offset (rse_bspstore_offset+0x08) | ||
233 | #define rse_ndirty_offset (rse_rnat_offset+0x08) | ||
234 | 234 | ||
235 | /* | 235 | #define ALIGN16(x) ((x)&~15) |
236 | * rse_switch_context | 236 | #define MCA_PT_REGS_OFFSET ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE) |
237 | * | 237 | #define MCA_SWITCH_STACK_OFFSET ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE) |
238 | * 1. Save old RSC onto the new stack frame | 238 | #define MCA_SOS_OFFSET ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE) |
239 | * 2. Save PFS onto new stack frame | 239 | #define MCA_SP_OFFSET ALIGN16(MCA_SOS_OFFSET-16) |
240 | * 3. Cover the old frame and start a new frame. | ||
241 | * 4. Save IFS onto new stack frame | ||
242 | * 5. Save the old BSPSTORE on the new stack frame | ||
243 | * 6. Save the old RNAT on the new stack frame | ||
244 | * 7. Write BSPSTORE with the new backing store pointer | ||
245 | * 8. Read and save the new BSP to calculate the #dirty registers | ||
246 | * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2 | ||
247 | */ | ||
248 | #define rse_switch_context(temp,p_stackframe,p_bspstore) \ | ||
249 | ;; \ | ||
250 | mov temp=ar.rsc;; \ | ||
251 | st8 [p_stackframe]=temp,8;; \ | ||
252 | mov temp=ar.pfs;; \ | ||
253 | st8 [p_stackframe]=temp,8; \ | ||
254 | cover ;; \ | ||
255 | mov temp=cr.ifs;; \ | ||
256 | st8 [p_stackframe]=temp,8;; \ | ||
257 | mov temp=ar.bspstore;; \ | ||
258 | st8 [p_stackframe]=temp,8;; \ | ||
259 | mov temp=ar.rnat;; \ | ||
260 | st8 [p_stackframe]=temp,8; \ | ||
261 | mov ar.bspstore=p_bspstore;; \ | ||
262 | mov temp=ar.bsp;; \ | ||
263 | sub temp=temp,p_bspstore;; \ | ||
264 | st8 [p_stackframe]=temp,8;; | ||
265 | |||
266 | /* | ||
267 | * rse_return_context | ||
268 | * 1. Allocate a zero-sized frame | ||
269 | * 2. Store the number of dirty registers RSC.loadrs field | ||
270 | * 3. Issue a loadrs to insure that any registers from the interrupted | ||
271 | * context which were saved on the new stack frame have been loaded | ||
272 | * back into the stacked registers | ||
273 | * 4. Restore BSPSTORE | ||
274 | * 5. Restore RNAT | ||
275 | * 6. Restore PFS | ||
276 | * 7. Restore IFS | ||
277 | * 8. Restore RSC | ||
278 | * 9. Issue an RFI | ||
279 | */ | ||
280 | #define rse_return_context(psr_mask_reg,temp,p_stackframe) \ | ||
281 | ;; \ | ||
282 | alloc temp=ar.pfs,0,0,0,0; \ | ||
283 | add p_stackframe=rse_ndirty_offset,p_stackframe;; \ | ||
284 | ld8 temp=[p_stackframe];; \ | ||
285 | shl temp=temp,16;; \ | ||
286 | mov ar.rsc=temp;; \ | ||
287 | loadrs;; \ | ||
288 | add p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\ | ||
289 | ld8 temp=[p_stackframe];; \ | ||
290 | mov ar.bspstore=temp;; \ | ||
291 | add p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\ | ||
292 | ld8 temp=[p_stackframe];; \ | ||
293 | mov ar.rnat=temp;; \ | ||
294 | add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;; \ | ||
295 | ld8 temp=[p_stackframe];; \ | ||
296 | mov ar.pfs=temp;; \ | ||
297 | add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;; \ | ||
298 | ld8 temp=[p_stackframe];; \ | ||
299 | mov cr.ifs=temp;; \ | ||
300 | add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;; \ | ||
301 | ld8 temp=[p_stackframe];; \ | ||
302 | mov ar.rsc=temp ; \ | ||
303 | mov temp=psr;; \ | ||
304 | or temp=temp,psr_mask_reg;; \ | ||
305 | mov cr.ipsr=temp;; \ | ||
306 | mov temp=ip;; \ | ||
307 | add temp=0x30,temp;; \ | ||
308 | mov cr.iip=temp;; \ | ||
309 | srlz.i;; \ | ||
310 | rfi;; | ||
311 | 240 | ||
312 | #endif /* _ASM_IA64_MCA_ASM_H */ | 241 | #endif /* _ASM_IA64_MCA_ASM_H */ |