diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-ia64/mca_asm.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-ia64/mca_asm.h')
-rw-r--r-- | include/asm-ia64/mca_asm.h | 312 |
1 files changed, 312 insertions, 0 deletions
diff --git a/include/asm-ia64/mca_asm.h b/include/asm-ia64/mca_asm.h new file mode 100644 index 000000000000..836953e0f91f --- /dev/null +++ b/include/asm-ia64/mca_asm.h | |||
@@ -0,0 +1,312 @@ | |||
1 | /* | ||
2 | * File: mca_asm.h | ||
3 | * | ||
4 | * Copyright (C) 1999 Silicon Graphics, Inc. | ||
5 | * Copyright (C) Vijay Chander (vijay@engr.sgi.com) | ||
6 | * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com> | ||
7 | * Copyright (C) 2000 Hewlett-Packard Co. | ||
8 | * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> | ||
9 | * Copyright (C) 2002 Intel Corp. | ||
10 | * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com> | ||
11 | */ | ||
12 | #ifndef _ASM_IA64_MCA_ASM_H | ||
13 | #define _ASM_IA64_MCA_ASM_H | ||
14 | |||
15 | #define PSR_IC 13 | ||
16 | #define PSR_I 14 | ||
17 | #define PSR_DT 17 | ||
18 | #define PSR_RT 27 | ||
19 | #define PSR_MC 35 | ||
20 | #define PSR_IT 36 | ||
21 | #define PSR_BN 44 | ||
22 | |||
23 | /* | ||
24 | * This macro converts a instruction virtual address to a physical address | ||
25 | * Right now for simulation purposes the virtual addresses are | ||
26 | * direct mapped to physical addresses. | ||
27 | * 1. Lop off bits 61 thru 63 in the virtual address | ||
28 | */ | ||
29 | #define INST_VA_TO_PA(addr) \ | ||
30 | dep addr = 0, addr, 61, 3 | ||
31 | /* | ||
32 | * This macro converts a data virtual address to a physical address | ||
33 | * Right now for simulation purposes the virtual addresses are | ||
34 | * direct mapped to physical addresses. | ||
35 | * 1. Lop off bits 61 thru 63 in the virtual address | ||
36 | */ | ||
37 | #define DATA_VA_TO_PA(addr) \ | ||
38 | tpa addr = addr | ||
39 | /* | ||
40 | * This macro converts a data physical address to a virtual address | ||
41 | * Right now for simulation purposes the virtual addresses are | ||
42 | * direct mapped to physical addresses. | ||
43 | * 1. Put 0x7 in bits 61 thru 63. | ||
44 | */ | ||
45 | #define DATA_PA_TO_VA(addr,temp) \ | ||
46 | mov temp = 0x7 ;; \ | ||
47 | dep addr = temp, addr, 61, 3 | ||
48 | |||
49 | #define GET_THIS_PADDR(reg, var) \ | ||
50 | mov reg = IA64_KR(PER_CPU_DATA);; \ | ||
51 | addl reg = THIS_CPU(var), reg | ||
52 | |||
53 | /* | ||
54 | * This macro jumps to the instruction at the given virtual address | ||
55 | * and starts execution in physical mode with all the address | ||
56 | * translations turned off. | ||
57 | * 1. Save the current psr | ||
58 | * 2. Make sure that all the upper 32 bits are off | ||
59 | * | ||
60 | * 3. Clear the interrupt enable and interrupt state collection bits | ||
61 | * in the psr before updating the ipsr and iip. | ||
62 | * | ||
63 | * 4. Turn off the instruction, data and rse translation bits of the psr | ||
64 | * and store the new value into ipsr | ||
65 | * Also make sure that the interrupts are disabled. | ||
66 | * Ensure that we are in little endian mode. | ||
67 | * [psr.{rt, it, dt, i, be} = 0] | ||
68 | * | ||
69 | * 5. Get the physical address corresponding to the virtual address | ||
70 | * of the next instruction bundle and put it in iip. | ||
71 | * (Using magic numbers 24 and 40 in the deposint instruction since | ||
72 | * the IA64_SDK code directly maps to lower 24bits as physical address | ||
73 | * from a virtual address). | ||
74 | * | ||
75 | * 6. Do an rfi to move the values from ipsr to psr and iip to ip. | ||
76 | */ | ||
77 | #define PHYSICAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \ | ||
78 | mov old_psr = psr; \ | ||
79 | ;; \ | ||
80 | dep old_psr = 0, old_psr, 32, 32; \ | ||
81 | \ | ||
82 | mov ar.rsc = 0 ; \ | ||
83 | ;; \ | ||
84 | srlz.d; \ | ||
85 | mov temp2 = ar.bspstore; \ | ||
86 | ;; \ | ||
87 | DATA_VA_TO_PA(temp2); \ | ||
88 | ;; \ | ||
89 | mov temp1 = ar.rnat; \ | ||
90 | ;; \ | ||
91 | mov ar.bspstore = temp2; \ | ||
92 | ;; \ | ||
93 | mov ar.rnat = temp1; \ | ||
94 | mov temp1 = psr; \ | ||
95 | mov temp2 = psr; \ | ||
96 | ;; \ | ||
97 | \ | ||
98 | dep temp2 = 0, temp2, PSR_IC, 2; \ | ||
99 | ;; \ | ||
100 | mov psr.l = temp2; \ | ||
101 | ;; \ | ||
102 | srlz.d; \ | ||
103 | dep temp1 = 0, temp1, 32, 32; \ | ||
104 | ;; \ | ||
105 | dep temp1 = 0, temp1, PSR_IT, 1; \ | ||
106 | ;; \ | ||
107 | dep temp1 = 0, temp1, PSR_DT, 1; \ | ||
108 | ;; \ | ||
109 | dep temp1 = 0, temp1, PSR_RT, 1; \ | ||
110 | ;; \ | ||
111 | dep temp1 = 0, temp1, PSR_I, 1; \ | ||
112 | ;; \ | ||
113 | dep temp1 = 0, temp1, PSR_IC, 1; \ | ||
114 | ;; \ | ||
115 | dep temp1 = -1, temp1, PSR_MC, 1; \ | ||
116 | ;; \ | ||
117 | mov cr.ipsr = temp1; \ | ||
118 | ;; \ | ||
119 | LOAD_PHYSICAL(p0, temp2, start_addr); \ | ||
120 | ;; \ | ||
121 | mov cr.iip = temp2; \ | ||
122 | mov cr.ifs = r0; \ | ||
123 | DATA_VA_TO_PA(sp); \ | ||
124 | DATA_VA_TO_PA(gp); \ | ||
125 | ;; \ | ||
126 | srlz.i; \ | ||
127 | ;; \ | ||
128 | nop 1; \ | ||
129 | nop 2; \ | ||
130 | nop 1; \ | ||
131 | nop 2; \ | ||
132 | rfi; \ | ||
133 | ;; | ||
134 | |||
135 | /* | ||
136 | * This macro jumps to the instruction at the given virtual address | ||
137 | * and starts execution in virtual mode with all the address | ||
138 | * translations turned on. | ||
139 | * 1. Get the old saved psr | ||
140 | * | ||
141 | * 2. Clear the interrupt state collection bit in the current psr. | ||
142 | * | ||
143 | * 3. Set the instruction translation bit back in the old psr | ||
144 | * Note we have to do this since we are right now saving only the | ||
145 | * lower 32-bits of old psr.(Also the old psr has the data and | ||
146 | * rse translation bits on) | ||
147 | * | ||
148 | * 4. Set ipsr to this old_psr with "it" bit set and "bn" = 1. | ||
149 | * | ||
150 | * 5. Reset the current thread pointer (r13). | ||
151 | * | ||
152 | * 6. Set iip to the virtual address of the next instruction bundle. | ||
153 | * | ||
154 | * 7. Do an rfi to move ipsr to psr and iip to ip. | ||
155 | */ | ||
156 | |||
157 | #define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \ | ||
158 | mov temp2 = psr; \ | ||
159 | ;; \ | ||
160 | mov old_psr = temp2; \ | ||
161 | ;; \ | ||
162 | dep temp2 = 0, temp2, PSR_IC, 2; \ | ||
163 | ;; \ | ||
164 | mov psr.l = temp2; \ | ||
165 | mov ar.rsc = 0; \ | ||
166 | ;; \ | ||
167 | srlz.d; \ | ||
168 | mov r13 = ar.k6; \ | ||
169 | mov temp2 = ar.bspstore; \ | ||
170 | ;; \ | ||
171 | DATA_PA_TO_VA(temp2,temp1); \ | ||
172 | ;; \ | ||
173 | mov temp1 = ar.rnat; \ | ||
174 | ;; \ | ||
175 | mov ar.bspstore = temp2; \ | ||
176 | ;; \ | ||
177 | mov ar.rnat = temp1; \ | ||
178 | ;; \ | ||
179 | mov temp1 = old_psr; \ | ||
180 | ;; \ | ||
181 | mov temp2 = 1; \ | ||
182 | ;; \ | ||
183 | dep temp1 = temp2, temp1, PSR_IC, 1; \ | ||
184 | ;; \ | ||
185 | dep temp1 = temp2, temp1, PSR_IT, 1; \ | ||
186 | ;; \ | ||
187 | dep temp1 = temp2, temp1, PSR_DT, 1; \ | ||
188 | ;; \ | ||
189 | dep temp1 = temp2, temp1, PSR_RT, 1; \ | ||
190 | ;; \ | ||
191 | dep temp1 = temp2, temp1, PSR_BN, 1; \ | ||
192 | ;; \ | ||
193 | \ | ||
194 | mov cr.ipsr = temp1; \ | ||
195 | movl temp2 = start_addr; \ | ||
196 | ;; \ | ||
197 | mov cr.iip = temp2; \ | ||
198 | ;; \ | ||
199 | DATA_PA_TO_VA(sp, temp1); \ | ||
200 | DATA_PA_TO_VA(gp, temp2); \ | ||
201 | srlz.i; \ | ||
202 | ;; \ | ||
203 | nop 1; \ | ||
204 | nop 2; \ | ||
205 | nop 1; \ | ||
206 | rfi \ | ||
207 | ;; | ||
208 | |||
209 | /* | ||
210 | * The following offsets capture the order in which the | ||
211 | * RSE related registers from the old context are | ||
212 | * saved onto the new stack frame. | ||
213 | * | ||
214 | * +-----------------------+ | ||
215 | * |NDIRTY [BSP - BSPSTORE]| | ||
216 | * +-----------------------+ | ||
217 | * | RNAT | | ||
218 | * +-----------------------+ | ||
219 | * | BSPSTORE | | ||
220 | * +-----------------------+ | ||
221 | * | IFS | | ||
222 | * +-----------------------+ | ||
223 | * | PFS | | ||
224 | * +-----------------------+ | ||
225 | * | RSC | | ||
226 | * +-----------------------+ <-------- Bottom of new stack frame | ||
227 | */ | ||
228 | #define rse_rsc_offset 0 | ||
229 | #define rse_pfs_offset (rse_rsc_offset+0x08) | ||
230 | #define rse_ifs_offset (rse_pfs_offset+0x08) | ||
231 | #define rse_bspstore_offset (rse_ifs_offset+0x08) | ||
232 | #define rse_rnat_offset (rse_bspstore_offset+0x08) | ||
233 | #define rse_ndirty_offset (rse_rnat_offset+0x08) | ||
234 | |||
235 | /* | ||
236 | * rse_switch_context | ||
237 | * | ||
238 | * 1. Save old RSC onto the new stack frame | ||
239 | * 2. Save PFS onto new stack frame | ||
240 | * 3. Cover the old frame and start a new frame. | ||
241 | * 4. Save IFS onto new stack frame | ||
242 | * 5. Save the old BSPSTORE on the new stack frame | ||
243 | * 6. Save the old RNAT on the new stack frame | ||
244 | * 7. Write BSPSTORE with the new backing store pointer | ||
245 | * 8. Read and save the new BSP to calculate the #dirty registers | ||
246 | * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2 | ||
247 | */ | ||
248 | #define rse_switch_context(temp,p_stackframe,p_bspstore) \ | ||
249 | ;; \ | ||
250 | mov temp=ar.rsc;; \ | ||
251 | st8 [p_stackframe]=temp,8;; \ | ||
252 | mov temp=ar.pfs;; \ | ||
253 | st8 [p_stackframe]=temp,8; \ | ||
254 | cover ;; \ | ||
255 | mov temp=cr.ifs;; \ | ||
256 | st8 [p_stackframe]=temp,8;; \ | ||
257 | mov temp=ar.bspstore;; \ | ||
258 | st8 [p_stackframe]=temp,8;; \ | ||
259 | mov temp=ar.rnat;; \ | ||
260 | st8 [p_stackframe]=temp,8; \ | ||
261 | mov ar.bspstore=p_bspstore;; \ | ||
262 | mov temp=ar.bsp;; \ | ||
263 | sub temp=temp,p_bspstore;; \ | ||
264 | st8 [p_stackframe]=temp,8;; | ||
265 | |||
266 | /* | ||
267 | * rse_return_context | ||
268 | * 1. Allocate a zero-sized frame | ||
269 | * 2. Store the number of dirty registers RSC.loadrs field | ||
270 | * 3. Issue a loadrs to insure that any registers from the interrupted | ||
271 | * context which were saved on the new stack frame have been loaded | ||
272 | * back into the stacked registers | ||
273 | * 4. Restore BSPSTORE | ||
274 | * 5. Restore RNAT | ||
275 | * 6. Restore PFS | ||
276 | * 7. Restore IFS | ||
277 | * 8. Restore RSC | ||
278 | * 9. Issue an RFI | ||
279 | */ | ||
280 | #define rse_return_context(psr_mask_reg,temp,p_stackframe) \ | ||
281 | ;; \ | ||
282 | alloc temp=ar.pfs,0,0,0,0; \ | ||
283 | add p_stackframe=rse_ndirty_offset,p_stackframe;; \ | ||
284 | ld8 temp=[p_stackframe];; \ | ||
285 | shl temp=temp,16;; \ | ||
286 | mov ar.rsc=temp;; \ | ||
287 | loadrs;; \ | ||
288 | add p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\ | ||
289 | ld8 temp=[p_stackframe];; \ | ||
290 | mov ar.bspstore=temp;; \ | ||
291 | add p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\ | ||
292 | ld8 temp=[p_stackframe];; \ | ||
293 | mov ar.rnat=temp;; \ | ||
294 | add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;; \ | ||
295 | ld8 temp=[p_stackframe];; \ | ||
296 | mov ar.pfs=temp;; \ | ||
297 | add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;; \ | ||
298 | ld8 temp=[p_stackframe];; \ | ||
299 | mov cr.ifs=temp;; \ | ||
300 | add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;; \ | ||
301 | ld8 temp=[p_stackframe];; \ | ||
302 | mov ar.rsc=temp ; \ | ||
303 | mov temp=psr;; \ | ||
304 | or temp=temp,psr_mask_reg;; \ | ||
305 | mov cr.ipsr=temp;; \ | ||
306 | mov temp=ip;; \ | ||
307 | add temp=0x30,temp;; \ | ||
308 | mov cr.iip=temp;; \ | ||
309 | srlz.i;; \ | ||
310 | rfi;; | ||
311 | |||
312 | #endif /* _ASM_IA64_MCA_ASM_H */ | ||