diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ia64/kernel/gate.S |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/ia64/kernel/gate.S')
-rw-r--r-- | arch/ia64/kernel/gate.S | 372 |
1 files changed, 372 insertions, 0 deletions
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S new file mode 100644 index 000000000000..facf75acdc85 --- /dev/null +++ b/arch/ia64/kernel/gate.S | |||
@@ -0,0 +1,372 @@ | |||
1 | /* | ||
2 | * This file contains the code that gets mapped at the upper end of each task's text | ||
3 | * region. For now, it contains the signal trampoline code only. | ||
4 | * | ||
5 | * Copyright (C) 1999-2003 Hewlett-Packard Co | ||
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | |||
11 | #include <asm/asmmacro.h> | ||
12 | #include <asm/errno.h> | ||
13 | #include <asm/offsets.h> | ||
14 | #include <asm/sigcontext.h> | ||
15 | #include <asm/system.h> | ||
16 | #include <asm/unistd.h> | ||
17 | |||
18 | /* | ||
19 | * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation, | ||
20 | * complications with the linker (which likes to create PLT stubs for branches | ||
21 | * to targets outside the shared object) and to avoid multi-phase kernel builds, we | ||
22 | * simply create minimalistic "patch lists" in special ELF sections. | ||
23 | */ | ||
24 | .section ".data.patch.fsyscall_table", "a" | ||
25 | .previous | ||
26 | #define LOAD_FSYSCALL_TABLE(reg) \ | ||
27 | [1:] movl reg=0; \ | ||
28 | .xdata4 ".data.patch.fsyscall_table", 1b-. | ||
29 | |||
30 | .section ".data.patch.brl_fsys_bubble_down", "a" | ||
31 | .previous | ||
32 | #define BRL_COND_FSYS_BUBBLE_DOWN(pr) \ | ||
33 | [1:](pr)brl.cond.sptk 0; \ | ||
34 | .xdata4 ".data.patch.brl_fsys_bubble_down", 1b-. | ||
35 | |||
36 | GLOBAL_ENTRY(__kernel_syscall_via_break) | ||
37 | .prologue | ||
38 | .altrp b6 | ||
39 | .body | ||
40 | /* | ||
41 | * Note: for (fast) syscall restart to work, the break instruction must be | ||
42 | * the first one in the bundle addressed by syscall_via_break. | ||
43 | */ | ||
44 | { .mib | ||
45 | break 0x100000 | ||
46 | nop.i 0 | ||
47 | br.ret.sptk.many b6 | ||
48 | } | ||
49 | END(__kernel_syscall_via_break) | ||
50 | |||
51 | /* | ||
52 | * On entry: | ||
53 | * r11 = saved ar.pfs | ||
54 | * r15 = system call # | ||
55 | * b0 = saved return address | ||
56 | * b6 = return address | ||
57 | * On exit: | ||
58 | * r11 = saved ar.pfs | ||
59 | * r15 = system call # | ||
60 | * b0 = saved return address | ||
61 | * all other "scratch" registers: undefined | ||
62 | * all "preserved" registers: same as on entry | ||
63 | */ | ||
64 | |||
65 | GLOBAL_ENTRY(__kernel_syscall_via_epc) | ||
66 | .prologue | ||
67 | .altrp b6 | ||
68 | .body | ||
69 | { | ||
70 | /* | ||
71 | * Note: the kernel cannot assume that the first two instructions in this | ||
72 | * bundle get executed. The remaining code must be safe even if | ||
73 | * they do not get executed. | ||
74 | */ | ||
75 | adds r17=-1024,r15 | ||
76 | mov r10=0 // default to successful syscall execution | ||
77 | epc | ||
78 | } | ||
79 | ;; | ||
80 | rsm psr.be // note: on McKinley "rsm psr.be/srlz.d" is slightly faster than "rum psr.be" | ||
81 | LOAD_FSYSCALL_TABLE(r14) | ||
82 | |||
83 | mov r16=IA64_KR(CURRENT) // 12 cycle read latency | ||
84 | tnat.nz p10,p9=r15 | ||
85 | mov r19=NR_syscalls-1 | ||
86 | ;; | ||
87 | shladd r18=r17,3,r14 | ||
88 | |||
89 | srlz.d | ||
90 | cmp.ne p8,p0=r0,r0 // p8 <- FALSE | ||
91 | /* Note: if r17 is a NaT, p6 will be set to zero. */ | ||
92 | cmp.geu p6,p7=r19,r17 // (syscall > 0 && syscall < 1024+NR_syscalls)? | ||
93 | ;; | ||
94 | (p6) ld8 r18=[r18] | ||
95 | mov r21=ar.fpsr | ||
96 | add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry | ||
97 | ;; | ||
98 | (p6) mov b7=r18 | ||
99 | (p6) tbit.z p8,p0=r18,0 | ||
100 | (p8) br.dptk.many b7 | ||
101 | |||
102 | (p6) rsm psr.i | ||
103 | mov r27=ar.rsc | ||
104 | mov r26=ar.pfs | ||
105 | ;; | ||
106 | mov r29=psr // read psr (12 cyc load latency) | ||
107 | /* | ||
108 | * brl.cond doesn't work as intended because the linker would convert this branch | ||
109 | * into a branch to a PLT. Perhaps there will be a way to avoid this with some | ||
110 | * future version of the linker. In the meantime, we just use an indirect branch | ||
111 | * instead. | ||
112 | */ | ||
113 | #ifdef CONFIG_ITANIUM | ||
114 | (p6) ld8 r14=[r14] // r14 <- fsys_bubble_down | ||
115 | ;; | ||
116 | (p6) mov b7=r14 | ||
117 | (p6) br.sptk.many b7 | ||
118 | #else | ||
119 | BRL_COND_FSYS_BUBBLE_DOWN(p6) | ||
120 | #endif | ||
121 | |||
122 | mov r10=-1 | ||
123 | (p10) mov r8=EINVAL | ||
124 | (p9) mov r8=ENOSYS | ||
125 | FSYS_RETURN | ||
126 | END(__kernel_syscall_via_epc) | ||
127 | |||
128 | # define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET) | ||
129 | # define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET) | ||
130 | # define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET) | ||
131 | # define SIGHANDLER_OFF (16 + IA64_SIGFRAME_HANDLER_OFFSET) | ||
132 | # define SIGCONTEXT_OFF (16 + IA64_SIGFRAME_SIGCONTEXT_OFFSET) | ||
133 | |||
134 | # define FLAGS_OFF IA64_SIGCONTEXT_FLAGS_OFFSET | ||
135 | # define CFM_OFF IA64_SIGCONTEXT_CFM_OFFSET | ||
136 | # define FR6_OFF IA64_SIGCONTEXT_FR6_OFFSET | ||
137 | # define BSP_OFF IA64_SIGCONTEXT_AR_BSP_OFFSET | ||
138 | # define RNAT_OFF IA64_SIGCONTEXT_AR_RNAT_OFFSET | ||
139 | # define UNAT_OFF IA64_SIGCONTEXT_AR_UNAT_OFFSET | ||
140 | # define FPSR_OFF IA64_SIGCONTEXT_AR_FPSR_OFFSET | ||
141 | # define PR_OFF IA64_SIGCONTEXT_PR_OFFSET | ||
142 | # define RP_OFF IA64_SIGCONTEXT_IP_OFFSET | ||
143 | # define SP_OFF IA64_SIGCONTEXT_R12_OFFSET | ||
144 | # define RBS_BASE_OFF IA64_SIGCONTEXT_RBS_BASE_OFFSET | ||
145 | # define LOADRS_OFF IA64_SIGCONTEXT_LOADRS_OFFSET | ||
146 | # define base0 r2 | ||
147 | # define base1 r3 | ||
148 | /* | ||
149 | * When we get here, the memory stack looks like this: | ||
150 | * | ||
151 | * +===============================+ | ||
152 | * | | | ||
153 | * // struct sigframe // | ||
154 | * | | | ||
155 | * +-------------------------------+ <-- sp+16 | ||
156 | * | 16 byte of scratch | | ||
157 | * | space | | ||
158 | * +-------------------------------+ <-- sp | ||
159 | * | ||
160 | * The register stack looks _exactly_ the way it looked at the time the signal | ||
161 | * occurred. In other words, we're treading on a potential mine-field: each | ||
162 | * incoming general register may be a NaT value (including sp, in which case the | ||
163 | * process ends up dying with a SIGSEGV). | ||
164 | * | ||
165 | * The first thing need to do is a cover to get the registers onto the backing | ||
166 | * store. Once that is done, we invoke the signal handler which may modify some | ||
167 | * of the machine state. After returning from the signal handler, we return | ||
168 | * control to the previous context by executing a sigreturn system call. A signal | ||
169 | * handler may call the rt_sigreturn() function to directly return to a given | ||
170 | * sigcontext. However, the user-level sigreturn() needs to do much more than | ||
171 | * calling the rt_sigreturn() system call as it needs to unwind the stack to | ||
172 | * restore preserved registers that may have been saved on the signal handler's | ||
173 | * call stack. | ||
174 | */ | ||
175 | |||
176 | #define SIGTRAMP_SAVES \ | ||
177 | .unwabi 3, 's'; /* mark this as a sigtramp handler (saves scratch regs) */ \ | ||
178 | .unwabi @svr4, 's'; /* backwards compatibility with old unwinders (remove in v2.7) */ \ | ||
179 | .savesp ar.unat, UNAT_OFF+SIGCONTEXT_OFF; \ | ||
180 | .savesp ar.fpsr, FPSR_OFF+SIGCONTEXT_OFF; \ | ||
181 | .savesp pr, PR_OFF+SIGCONTEXT_OFF; \ | ||
182 | .savesp rp, RP_OFF+SIGCONTEXT_OFF; \ | ||
183 | .savesp ar.pfs, CFM_OFF+SIGCONTEXT_OFF; \ | ||
184 | .vframesp SP_OFF+SIGCONTEXT_OFF | ||
185 | |||
186 | GLOBAL_ENTRY(__kernel_sigtramp) | ||
187 | // describe the state that is active when we get here: | ||
188 | .prologue | ||
189 | SIGTRAMP_SAVES | ||
190 | .body | ||
191 | |||
192 | .label_state 1 | ||
193 | |||
194 | adds base0=SIGHANDLER_OFF,sp | ||
195 | adds base1=RBS_BASE_OFF+SIGCONTEXT_OFF,sp | ||
196 | br.call.sptk.many rp=1f | ||
197 | 1: | ||
198 | ld8 r17=[base0],(ARG0_OFF-SIGHANDLER_OFF) // get pointer to signal handler's plabel | ||
199 | ld8 r15=[base1] // get address of new RBS base (or NULL) | ||
200 | cover // push args in interrupted frame onto backing store | ||
201 | ;; | ||
202 | cmp.ne p1,p0=r15,r0 // do we need to switch rbs? (note: pr is saved by kernel) | ||
203 | mov.m r9=ar.bsp // fetch ar.bsp | ||
204 | .spillsp.p p1, ar.rnat, RNAT_OFF+SIGCONTEXT_OFF | ||
205 | (p1) br.cond.spnt setup_rbs // yup -> (clobbers p8, r14-r16, and r18-r20) | ||
206 | back_from_setup_rbs: | ||
207 | alloc r8=ar.pfs,0,0,3,0 | ||
208 | ld8 out0=[base0],16 // load arg0 (signum) | ||
209 | adds base1=(ARG1_OFF-(RBS_BASE_OFF+SIGCONTEXT_OFF)),base1 | ||
210 | ;; | ||
211 | ld8 out1=[base1] // load arg1 (siginfop) | ||
212 | ld8 r10=[r17],8 // get signal handler entry point | ||
213 | ;; | ||
214 | ld8 out2=[base0] // load arg2 (sigcontextp) | ||
215 | ld8 gp=[r17] // get signal handler's global pointer | ||
216 | adds base0=(BSP_OFF+SIGCONTEXT_OFF),sp | ||
217 | ;; | ||
218 | .spillsp ar.bsp, BSP_OFF+SIGCONTEXT_OFF | ||
219 | st8 [base0]=r9 // save sc_ar_bsp | ||
220 | adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp | ||
221 | adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp | ||
222 | ;; | ||
223 | stf.spill [base0]=f6,32 | ||
224 | stf.spill [base1]=f7,32 | ||
225 | ;; | ||
226 | stf.spill [base0]=f8,32 | ||
227 | stf.spill [base1]=f9,32 | ||
228 | mov b6=r10 | ||
229 | ;; | ||
230 | stf.spill [base0]=f10,32 | ||
231 | stf.spill [base1]=f11,32 | ||
232 | ;; | ||
233 | stf.spill [base0]=f12,32 | ||
234 | stf.spill [base1]=f13,32 | ||
235 | ;; | ||
236 | stf.spill [base0]=f14,32 | ||
237 | stf.spill [base1]=f15,32 | ||
238 | br.call.sptk.many rp=b6 // call the signal handler | ||
239 | .ret0: adds base0=(BSP_OFF+SIGCONTEXT_OFF),sp | ||
240 | ;; | ||
241 | ld8 r15=[base0] // fetch sc_ar_bsp | ||
242 | mov r14=ar.bsp | ||
243 | ;; | ||
244 | cmp.ne p1,p0=r14,r15 // do we need to restore the rbs? | ||
245 | (p1) br.cond.spnt restore_rbs // yup -> (clobbers r14-r18, f6 & f7) | ||
246 | ;; | ||
247 | back_from_restore_rbs: | ||
248 | adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp | ||
249 | adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp | ||
250 | ;; | ||
251 | ldf.fill f6=[base0],32 | ||
252 | ldf.fill f7=[base1],32 | ||
253 | ;; | ||
254 | ldf.fill f8=[base0],32 | ||
255 | ldf.fill f9=[base1],32 | ||
256 | ;; | ||
257 | ldf.fill f10=[base0],32 | ||
258 | ldf.fill f11=[base1],32 | ||
259 | ;; | ||
260 | ldf.fill f12=[base0],32 | ||
261 | ldf.fill f13=[base1],32 | ||
262 | ;; | ||
263 | ldf.fill f14=[base0],32 | ||
264 | ldf.fill f15=[base1],32 | ||
265 | mov r15=__NR_rt_sigreturn | ||
266 | .restore sp // pop .prologue | ||
267 | break __BREAK_SYSCALL | ||
268 | |||
269 | .prologue | ||
270 | SIGTRAMP_SAVES | ||
271 | setup_rbs: | ||
272 | mov ar.rsc=0 // put RSE into enforced lazy mode | ||
273 | ;; | ||
274 | .save ar.rnat, r19 | ||
275 | mov r19=ar.rnat // save RNaT before switching backing store area | ||
276 | adds r14=(RNAT_OFF+SIGCONTEXT_OFF),sp | ||
277 | |||
278 | mov r18=ar.bspstore | ||
279 | mov ar.bspstore=r15 // switch over to new register backing store area | ||
280 | ;; | ||
281 | |||
282 | .spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF | ||
283 | st8 [r14]=r19 // save sc_ar_rnat | ||
284 | .body | ||
285 | mov.m r16=ar.bsp // sc_loadrs <- (new bsp - new bspstore) << 16 | ||
286 | adds r14=(LOADRS_OFF+SIGCONTEXT_OFF),sp | ||
287 | ;; | ||
288 | invala | ||
289 | sub r15=r16,r15 | ||
290 | extr.u r20=r18,3,6 | ||
291 | ;; | ||
292 | mov ar.rsc=0xf // set RSE into eager mode, pl 3 | ||
293 | cmp.eq p8,p0=63,r20 | ||
294 | shl r15=r15,16 | ||
295 | ;; | ||
296 | st8 [r14]=r15 // save sc_loadrs | ||
297 | (p8) st8 [r18]=r19 // if bspstore points at RNaT slot, store RNaT there now | ||
298 | .restore sp // pop .prologue | ||
299 | br.cond.sptk back_from_setup_rbs | ||
300 | |||
301 | .prologue | ||
302 | SIGTRAMP_SAVES | ||
303 | .spillsp ar.rnat, RNAT_OFF+SIGCONTEXT_OFF | ||
304 | .body | ||
305 | restore_rbs: | ||
306 | // On input: | ||
307 | // r14 = bsp1 (bsp at the time of return from signal handler) | ||
308 | // r15 = bsp0 (bsp at the time the signal occurred) | ||
309 | // | ||
310 | // Here, we need to calculate bspstore0, the value that ar.bspstore needs | ||
311 | // to be set to, based on bsp0 and the size of the dirty partition on | ||
312 | // the alternate stack (sc_loadrs >> 16). This can be done with the | ||
313 | // following algorithm: | ||
314 | // | ||
315 | // bspstore0 = rse_skip_regs(bsp0, -rse_num_regs(bsp1 - (loadrs >> 19), bsp1)); | ||
316 | // | ||
317 | // This is what the code below does. | ||
318 | // | ||
319 | alloc r2=ar.pfs,0,0,0,0 // alloc null frame | ||
320 | adds r16=(LOADRS_OFF+SIGCONTEXT_OFF),sp | ||
321 | adds r18=(RNAT_OFF+SIGCONTEXT_OFF),sp | ||
322 | ;; | ||
323 | ld8 r17=[r16] | ||
324 | ld8 r16=[r18] // get new rnat | ||
325 | extr.u r18=r15,3,6 // r18 <- rse_slot_num(bsp0) | ||
326 | ;; | ||
327 | mov ar.rsc=r17 // put RSE into enforced lazy mode | ||
328 | shr.u r17=r17,16 | ||
329 | ;; | ||
330 | sub r14=r14,r17 // r14 (bspstore1) <- bsp1 - (sc_loadrs >> 16) | ||
331 | shr.u r17=r17,3 // r17 <- (sc_loadrs >> 19) | ||
332 | ;; | ||
333 | loadrs // restore dirty partition | ||
334 | extr.u r14=r14,3,6 // r14 <- rse_slot_num(bspstore1) | ||
335 | ;; | ||
336 | add r14=r14,r17 // r14 <- rse_slot_num(bspstore1) + (sc_loadrs >> 19) | ||
337 | ;; | ||
338 | shr.u r14=r14,6 // r14 <- (rse_slot_num(bspstore1) + (sc_loadrs >> 19))/0x40 | ||
339 | ;; | ||
340 | sub r14=r14,r17 // r14 <- -rse_num_regs(bspstore1, bsp1) | ||
341 | movl r17=0x8208208208208209 | ||
342 | ;; | ||
343 | add r18=r18,r14 // r18 (delta) <- rse_slot_num(bsp0) - rse_num_regs(bspstore1,bsp1) | ||
344 | setf.sig f7=r17 | ||
345 | cmp.lt p7,p0=r14,r0 // p7 <- (r14 < 0)? | ||
346 | ;; | ||
347 | (p7) adds r18=-62,r18 // delta -= 62 | ||
348 | ;; | ||
349 | setf.sig f6=r18 | ||
350 | ;; | ||
351 | xmpy.h f6=f6,f7 | ||
352 | ;; | ||
353 | getf.sig r17=f6 | ||
354 | ;; | ||
355 | add r17=r17,r18 | ||
356 | shr r18=r18,63 | ||
357 | ;; | ||
358 | shr r17=r17,5 | ||
359 | ;; | ||
360 | sub r17=r17,r18 // r17 = delta/63 | ||
361 | ;; | ||
362 | add r17=r14,r17 // r17 <- delta/63 - rse_num_regs(bspstore1, bsp1) | ||
363 | ;; | ||
364 | shladd r15=r17,3,r15 // r15 <- bsp0 + 8*(delta/63 - rse_num_regs(bspstore1, bsp1)) | ||
365 | ;; | ||
366 | mov ar.bspstore=r15 // switch back to old register backing store area | ||
367 | ;; | ||
368 | mov ar.rnat=r16 // restore RNaT | ||
369 | mov ar.rsc=0xf // (will be restored later on from sc_ar_rsc) | ||
370 | // invala not necessary as that will happen when returning to user-mode | ||
371 | br.cond.sptk back_from_restore_rbs | ||
372 | END(__kernel_sigtramp) | ||