diff options
author | Keith Owens <kaos@sgi.com> | 2005-09-11 03:22:53 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-09-11 17:08:41 -0400 |
commit | 7f613c7d2203ae137d98fc1c38abc30fd7048637 (patch) | |
tree | d8155a5cca33e4fe178625396886fcbb81f39e7a /include | |
parent | 289d773ee89ea80dcc364ef97d1be7ad1817387e (diff) |
[PATCH] MCA/INIT: use per cpu stacks
The bulk of the change. Use per cpu MCA/INIT stacks. Change the SAL
to OS state (sos) to be per process. Do all the assembler work on the
MCA/INIT stacks, leaving the original stack alone. Pass per cpu state
data to the C handlers for MCA and INIT, which also means changing the
mca_drv interfaces slightly. Lots of verification on whether the
original stack is usable before converting it to a sleeping process.
Signed-off-by: Keith Owens <kaos@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-ia64/mca.h | 102 | ||||
-rw-r--r-- | include/asm-ia64/mca_asm.h | 125 |
2 files changed, 91 insertions, 136 deletions
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h index 149ad0118455..97a28b8b2ddd 100644 --- a/include/asm-ia64/mca.h +++ b/include/asm-ia64/mca.h | |||
@@ -11,8 +11,6 @@ | |||
11 | #ifndef _ASM_IA64_MCA_H | 11 | #ifndef _ASM_IA64_MCA_H |
12 | #define _ASM_IA64_MCA_H | 12 | #define _ASM_IA64_MCA_H |
13 | 13 | ||
14 | #define IA64_MCA_STACK_SIZE 8192 | ||
15 | |||
16 | #if !defined(__ASSEMBLY__) | 14 | #if !defined(__ASSEMBLY__) |
17 | 15 | ||
18 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
@@ -48,7 +46,8 @@ typedef union cmcv_reg_u { | |||
48 | 46 | ||
49 | enum { | 47 | enum { |
50 | IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0, | 48 | IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0, |
51 | IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1 | 49 | IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1, |
50 | IA64_MCA_RENDEZ_CHECKIN_INIT = 0x2, | ||
52 | }; | 51 | }; |
53 | 52 | ||
54 | /* Information maintained by the MC infrastructure */ | 53 | /* Information maintained by the MC infrastructure */ |
@@ -63,18 +62,42 @@ typedef struct ia64_mc_info_s { | |||
63 | 62 | ||
64 | } ia64_mc_info_t; | 63 | } ia64_mc_info_t; |
65 | 64 | ||
66 | typedef struct ia64_mca_sal_to_os_state_s { | 65 | /* Handover state from SAL to OS and vice versa, for both MCA and INIT events. |
67 | u64 imsto_os_gp; /* GP of the os registered with the SAL */ | 66 | * Besides the handover state, it also contains some saved registers from the |
68 | u64 imsto_pal_proc; /* PAL_PROC entry point - physical addr */ | 67 | * time of the event. |
69 | u64 imsto_sal_proc; /* SAL_PROC entry point - physical addr */ | 68 | * Note: mca_asm.S depends on the precise layout of this structure. |
70 | u64 imsto_sal_gp; /* GP of the SAL - physical */ | 69 | */ |
71 | u64 imsto_rendez_state; /* Rendez state information */ | 70 | |
72 | u64 imsto_sal_check_ra; /* Return address in SAL_CHECK while going | 71 | struct ia64_sal_os_state { |
73 | * back to SAL from OS after MCA handling. | 72 | /* SAL to OS, must be at offset 0 */ |
74 | */ | 73 | u64 os_gp; /* GP of the os registered with the SAL, physical */ |
75 | u64 pal_min_state; /* from PAL in r17 */ | 74 | u64 pal_proc; /* PAL_PROC entry point, physical */ |
76 | u64 proc_state_param; /* from PAL in r18. See SDV 2:268 11.3.2.1 */ | 75 | u64 sal_proc; /* SAL_PROC entry point, physical */ |
77 | } ia64_mca_sal_to_os_state_t; | 76 | u64 rv_rc; /* MCA - Rendezvous state, INIT - reason code */ |
77 | u64 proc_state_param; /* from R18 */ | ||
78 | u64 monarch; /* 1 for a monarch event, 0 for a slave */ | ||
79 | /* common, must follow SAL to OS */ | ||
80 | u64 sal_ra; /* Return address in SAL, physical */ | ||
81 | u64 sal_gp; /* GP of the SAL - physical */ | ||
82 | pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */ | ||
83 | u64 prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */ | ||
84 | struct task_struct *prev_task; /* previous task, NULL if it is not useful */ | ||
85 | /* Some interrupt registers are not saved in minstate, pt_regs or | ||
86 | * switch_stack. Because MCA/INIT can occur when interrupts are | ||
87 | * disabled, we need to save the additional interrupt registers over | ||
88 | * MCA/INIT and resume. | ||
89 | */ | ||
90 | u64 isr; | ||
91 | u64 ifa; | ||
92 | u64 itir; | ||
93 | u64 iipa; | ||
94 | u64 iim; | ||
95 | u64 iha; | ||
96 | /* OS to SAL, must follow common */ | ||
97 | u64 os_status; /* OS status to SAL, enum below */ | ||
98 | u64 context; /* 0 if return to same context | ||
99 | 1 if return to new context */ | ||
100 | }; | ||
78 | 101 | ||
79 | enum { | 102 | enum { |
80 | IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */ | 103 | IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */ |
@@ -84,35 +107,21 @@ enum { | |||
84 | }; | 107 | }; |
85 | 108 | ||
86 | enum { | 109 | enum { |
110 | IA64_INIT_RESUME = 0x0, /* Resume after return from INIT */ | ||
111 | IA64_INIT_WARM_BOOT = -1, /* Warm boot of the system need from SAL */ | ||
112 | }; | ||
113 | |||
114 | enum { | ||
87 | IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */ | 115 | IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */ |
88 | IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */ | 116 | IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */ |
89 | }; | 117 | }; |
90 | 118 | ||
91 | typedef struct ia64_mca_os_to_sal_state_s { | ||
92 | u64 imots_os_status; /* OS status to SAL as to what happened | ||
93 | * with the MCA handling. | ||
94 | */ | ||
95 | u64 imots_sal_gp; /* GP of the SAL - physical */ | ||
96 | u64 imots_context; /* 0 if return to same context | ||
97 | 1 if return to new context */ | ||
98 | u64 *imots_new_min_state; /* Pointer to structure containing | ||
99 | * new values of registers in the min state | ||
100 | * save area. | ||
101 | */ | ||
102 | u64 imots_sal_check_ra; /* Return address in SAL_CHECK while going | ||
103 | * back to SAL from OS after MCA handling. | ||
104 | */ | ||
105 | } ia64_mca_os_to_sal_state_t; | ||
106 | |||
107 | /* Per-CPU MCA state that is too big for normal per-CPU variables. */ | 119 | /* Per-CPU MCA state that is too big for normal per-CPU variables. */ |
108 | 120 | ||
109 | struct ia64_mca_cpu { | 121 | struct ia64_mca_cpu { |
110 | u64 stack[IA64_MCA_STACK_SIZE/8]; /* MCA memory-stack */ | 122 | u64 mca_stack[KERNEL_STACK_SIZE/8]; |
111 | u64 proc_state_dump[512]; | ||
112 | u64 stackframe[32]; | ||
113 | u64 rbstore[IA64_MCA_STACK_SIZE/8]; /* MCA reg.-backing store */ | ||
114 | u64 init_stack[KERNEL_STACK_SIZE/8]; | 123 | u64 init_stack[KERNEL_STACK_SIZE/8]; |
115 | } __attribute__ ((aligned(16))); | 124 | }; |
116 | 125 | ||
117 | /* Array of physical addresses of each CPU's MCA area. */ | 126 | /* Array of physical addresses of each CPU's MCA area. */ |
118 | extern unsigned long __per_cpu_mca[NR_CPUS]; | 127 | extern unsigned long __per_cpu_mca[NR_CPUS]; |
@@ -121,12 +130,29 @@ extern void ia64_mca_init(void); | |||
121 | extern void ia64_mca_cpu_init(void *); | 130 | extern void ia64_mca_cpu_init(void *); |
122 | extern void ia64_os_mca_dispatch(void); | 131 | extern void ia64_os_mca_dispatch(void); |
123 | extern void ia64_os_mca_dispatch_end(void); | 132 | extern void ia64_os_mca_dispatch_end(void); |
124 | extern void ia64_mca_ucmc_handler(void); | 133 | extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *); |
134 | extern void ia64_init_handler(struct pt_regs *, | ||
135 | struct switch_stack *, | ||
136 | struct ia64_sal_os_state *); | ||
125 | extern void ia64_monarch_init_handler(void); | 137 | extern void ia64_monarch_init_handler(void); |
126 | extern void ia64_slave_init_handler(void); | 138 | extern void ia64_slave_init_handler(void); |
127 | extern void ia64_mca_cmc_vector_setup(void); | 139 | extern void ia64_mca_cmc_vector_setup(void); |
128 | extern int ia64_reg_MCA_extension(void*); | 140 | extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)); |
129 | extern void ia64_unreg_MCA_extension(void); | 141 | extern void ia64_unreg_MCA_extension(void); |
142 | extern u64 ia64_get_rnat(u64 *); | ||
143 | |||
144 | #else /* __ASSEMBLY__ */ | ||
145 | |||
146 | #define IA64_MCA_CORRECTED 0x0 /* Error has been corrected by OS_MCA */ | ||
147 | #define IA64_MCA_WARM_BOOT -1 /* Warm boot of the system need from SAL */ | ||
148 | #define IA64_MCA_COLD_BOOT -2 /* Cold boot of the system need from SAL */ | ||
149 | #define IA64_MCA_HALT -3 /* System to be halted by SAL */ | ||
150 | |||
151 | #define IA64_INIT_RESUME 0x0 /* Resume after return from INIT */ | ||
152 | #define IA64_INIT_WARM_BOOT -1 /* Warm boot of the system need from SAL */ | ||
153 | |||
154 | #define IA64_MCA_SAME_CONTEXT 0x0 /* SAL to return to same context */ | ||
155 | #define IA64_MCA_NEW_CONTEXT -1 /* SAL to return to new context */ | ||
130 | 156 | ||
131 | #endif /* !__ASSEMBLY__ */ | 157 | #endif /* !__ASSEMBLY__ */ |
132 | #endif /* _ASM_IA64_MCA_H */ | 158 | #endif /* _ASM_IA64_MCA_H */ |
diff --git a/include/asm-ia64/mca_asm.h b/include/asm-ia64/mca_asm.h index 836953e0f91f..27c9203d8ce3 100644 --- a/include/asm-ia64/mca_asm.h +++ b/include/asm-ia64/mca_asm.h | |||
@@ -8,6 +8,8 @@ | |||
8 | * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> | 8 | * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> |
9 | * Copyright (C) 2002 Intel Corp. | 9 | * Copyright (C) 2002 Intel Corp. |
10 | * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com> | 10 | * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com> |
11 | * Copyright (C) 2005 Silicon Graphics, Inc | ||
12 | * Copyright (C) 2005 Keith Owens <kaos@sgi.com> | ||
11 | */ | 13 | */ |
12 | #ifndef _ASM_IA64_MCA_ASM_H | 14 | #ifndef _ASM_IA64_MCA_ASM_H |
13 | #define _ASM_IA64_MCA_ASM_H | 15 | #define _ASM_IA64_MCA_ASM_H |
@@ -207,106 +209,33 @@ | |||
207 | ;; | 209 | ;; |
208 | 210 | ||
209 | /* | 211 | /* |
210 | * The following offsets capture the order in which the | 212 | * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel |
211 | * RSE related registers from the old context are | 213 | * stacks, except that the SAL/OS state and a switch_stack are stored near the |
212 | * saved onto the new stack frame. | 214 | * top of the MCA/INIT stack. To support concurrent entry to MCA or INIT, as |
215 | * well as MCA over INIT, each event needs its own SAL/OS state. All entries | ||
216 | * are 16 byte aligned. | ||
213 | * | 217 | * |
214 | * +-----------------------+ | 218 | * +---------------------------+ |
215 | * |NDIRTY [BSP - BSPSTORE]| | 219 | * | pt_regs | |
216 | * +-----------------------+ | 220 | * +---------------------------+ |
217 | * | RNAT | | 221 | * | switch_stack | |
218 | * +-----------------------+ | 222 | * +---------------------------+ |
219 | * | BSPSTORE | | 223 | * | SAL/OS state | |
220 | * +-----------------------+ | 224 | * +---------------------------+ |
221 | * | IFS | | 225 | * | 16 byte scratch area | |
222 | * +-----------------------+ | 226 | * +---------------------------+ <-------- SP at start of C MCA handler |
223 | * | PFS | | 227 | * | ..... | |
224 | * +-----------------------+ | 228 | * +---------------------------+ |
225 | * | RSC | | 229 | * | RBS for MCA/INIT handler | |
226 | * +-----------------------+ <-------- Bottom of new stack frame | 230 | * +---------------------------+ |
231 | * | struct task for MCA/INIT | | ||
232 | * +---------------------------+ <-------- Bottom of MCA/INIT stack | ||
227 | */ | 233 | */ |
228 | #define rse_rsc_offset 0 | ||
229 | #define rse_pfs_offset (rse_rsc_offset+0x08) | ||
230 | #define rse_ifs_offset (rse_pfs_offset+0x08) | ||
231 | #define rse_bspstore_offset (rse_ifs_offset+0x08) | ||
232 | #define rse_rnat_offset (rse_bspstore_offset+0x08) | ||
233 | #define rse_ndirty_offset (rse_rnat_offset+0x08) | ||
234 | 234 | ||
235 | /* | 235 | #define ALIGN16(x) ((x)&~15) |
236 | * rse_switch_context | 236 | #define MCA_PT_REGS_OFFSET ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE) |
237 | * | 237 | #define MCA_SWITCH_STACK_OFFSET ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE) |
238 | * 1. Save old RSC onto the new stack frame | 238 | #define MCA_SOS_OFFSET ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE) |
239 | * 2. Save PFS onto new stack frame | 239 | #define MCA_SP_OFFSET ALIGN16(MCA_SOS_OFFSET-16) |
240 | * 3. Cover the old frame and start a new frame. | ||
241 | * 4. Save IFS onto new stack frame | ||
242 | * 5. Save the old BSPSTORE on the new stack frame | ||
243 | * 6. Save the old RNAT on the new stack frame | ||
244 | * 7. Write BSPSTORE with the new backing store pointer | ||
245 | * 8. Read and save the new BSP to calculate the #dirty registers | ||
246 | * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2 | ||
247 | */ | ||
248 | #define rse_switch_context(temp,p_stackframe,p_bspstore) \ | ||
249 | ;; \ | ||
250 | mov temp=ar.rsc;; \ | ||
251 | st8 [p_stackframe]=temp,8;; \ | ||
252 | mov temp=ar.pfs;; \ | ||
253 | st8 [p_stackframe]=temp,8; \ | ||
254 | cover ;; \ | ||
255 | mov temp=cr.ifs;; \ | ||
256 | st8 [p_stackframe]=temp,8;; \ | ||
257 | mov temp=ar.bspstore;; \ | ||
258 | st8 [p_stackframe]=temp,8;; \ | ||
259 | mov temp=ar.rnat;; \ | ||
260 | st8 [p_stackframe]=temp,8; \ | ||
261 | mov ar.bspstore=p_bspstore;; \ | ||
262 | mov temp=ar.bsp;; \ | ||
263 | sub temp=temp,p_bspstore;; \ | ||
264 | st8 [p_stackframe]=temp,8;; | ||
265 | |||
266 | /* | ||
267 | * rse_return_context | ||
268 | * 1. Allocate a zero-sized frame | ||
269 | * 2. Store the number of dirty registers RSC.loadrs field | ||
270 | * 3. Issue a loadrs to insure that any registers from the interrupted | ||
271 | * context which were saved on the new stack frame have been loaded | ||
272 | * back into the stacked registers | ||
273 | * 4. Restore BSPSTORE | ||
274 | * 5. Restore RNAT | ||
275 | * 6. Restore PFS | ||
276 | * 7. Restore IFS | ||
277 | * 8. Restore RSC | ||
278 | * 9. Issue an RFI | ||
279 | */ | ||
280 | #define rse_return_context(psr_mask_reg,temp,p_stackframe) \ | ||
281 | ;; \ | ||
282 | alloc temp=ar.pfs,0,0,0,0; \ | ||
283 | add p_stackframe=rse_ndirty_offset,p_stackframe;; \ | ||
284 | ld8 temp=[p_stackframe];; \ | ||
285 | shl temp=temp,16;; \ | ||
286 | mov ar.rsc=temp;; \ | ||
287 | loadrs;; \ | ||
288 | add p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\ | ||
289 | ld8 temp=[p_stackframe];; \ | ||
290 | mov ar.bspstore=temp;; \ | ||
291 | add p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\ | ||
292 | ld8 temp=[p_stackframe];; \ | ||
293 | mov ar.rnat=temp;; \ | ||
294 | add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;; \ | ||
295 | ld8 temp=[p_stackframe];; \ | ||
296 | mov ar.pfs=temp;; \ | ||
297 | add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;; \ | ||
298 | ld8 temp=[p_stackframe];; \ | ||
299 | mov cr.ifs=temp;; \ | ||
300 | add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;; \ | ||
301 | ld8 temp=[p_stackframe];; \ | ||
302 | mov ar.rsc=temp ; \ | ||
303 | mov temp=psr;; \ | ||
304 | or temp=temp,psr_mask_reg;; \ | ||
305 | mov cr.ipsr=temp;; \ | ||
306 | mov temp=ip;; \ | ||
307 | add temp=0x30,temp;; \ | ||
308 | mov cr.iip=temp;; \ | ||
309 | srlz.i;; \ | ||
310 | rfi;; | ||
311 | 240 | ||
312 | #endif /* _ASM_IA64_MCA_ASM_H */ | 241 | #endif /* _ASM_IA64_MCA_ASM_H */ |