diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/v850/kernel/entry.S |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/v850/kernel/entry.S')
-rw-r--r-- | arch/v850/kernel/entry.S | 1121 |
1 files changed, 1121 insertions, 0 deletions
diff --git a/arch/v850/kernel/entry.S b/arch/v850/kernel/entry.S new file mode 100644 index 000000000000..895e27b1d839 --- /dev/null +++ b/arch/v850/kernel/entry.S | |||
@@ -0,0 +1,1121 @@ | |||
1 | /* | ||
2 | * arch/v850/kernel/entry.S -- Low-level system-call handling, trap handlers, | ||
3 | * and context-switching | ||
4 | * | ||
5 | * Copyright (C) 2001,02,03 NEC Electronics Corporation | ||
6 | * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org> | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General | ||
9 | * Public License. See the file COPYING in the main directory of this | ||
10 | * archive for more details. | ||
11 | * | ||
12 | * Written by Miles Bader <miles@gnu.org> | ||
13 | */ | ||
14 | |||
15 | #include <linux/sys.h> | ||
16 | |||
17 | #include <asm/entry.h> | ||
18 | #include <asm/current.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | #include <asm/clinkage.h> | ||
21 | #include <asm/processor.h> | ||
22 | #include <asm/irq.h> | ||
23 | #include <asm/errno.h> | ||
24 | |||
25 | #include <asm/asm-consts.h> | ||
26 | |||
27 | |||
28 | /* Make a slightly more convenient alias for C_SYMBOL_NAME. */ | ||
29 | #define CSYM C_SYMBOL_NAME | ||
30 | |||
31 | |||
32 | /* The offset of the struct pt_regs in a state-save-frame on the stack. */ | ||
33 | #define PTO STATE_SAVE_PT_OFFSET | ||
34 | |||
35 | |||
36 | /* Save argument registers to the state-save-frame pointed to by EP. */ | ||
37 | #define SAVE_ARG_REGS \ | ||
38 | sst.w r6, PTO+PT_GPR(6)[ep]; \ | ||
39 | sst.w r7, PTO+PT_GPR(7)[ep]; \ | ||
40 | sst.w r8, PTO+PT_GPR(8)[ep]; \ | ||
41 | sst.w r9, PTO+PT_GPR(9)[ep] | ||
42 | /* Restore argument registers from the state-save-frame pointed to by EP. */ | ||
43 | #define RESTORE_ARG_REGS \ | ||
44 | sld.w PTO+PT_GPR(6)[ep], r6; \ | ||
45 | sld.w PTO+PT_GPR(7)[ep], r7; \ | ||
46 | sld.w PTO+PT_GPR(8)[ep], r8; \ | ||
47 | sld.w PTO+PT_GPR(9)[ep], r9 | ||
48 | |||
49 | /* Save value return registers to the state-save-frame pointed to by EP. */ | ||
50 | #define SAVE_RVAL_REGS \ | ||
51 | sst.w r10, PTO+PT_GPR(10)[ep]; \ | ||
52 | sst.w r11, PTO+PT_GPR(11)[ep] | ||
53 | /* Restore value return registers from the state-save-frame pointed to by EP. */ | ||
54 | #define RESTORE_RVAL_REGS \ | ||
55 | sld.w PTO+PT_GPR(10)[ep], r10; \ | ||
56 | sld.w PTO+PT_GPR(11)[ep], r11 | ||
57 | |||
58 | |||
59 | #define SAVE_CALL_CLOBBERED_REGS_BEFORE_ARGS \ | ||
60 | sst.w r1, PTO+PT_GPR(1)[ep]; \ | ||
61 | sst.w r5, PTO+PT_GPR(5)[ep] | ||
62 | #define SAVE_CALL_CLOBBERED_REGS_AFTER_RVAL \ | ||
63 | sst.w r12, PTO+PT_GPR(12)[ep]; \ | ||
64 | sst.w r13, PTO+PT_GPR(13)[ep]; \ | ||
65 | sst.w r14, PTO+PT_GPR(14)[ep]; \ | ||
66 | sst.w r15, PTO+PT_GPR(15)[ep]; \ | ||
67 | sst.w r16, PTO+PT_GPR(16)[ep]; \ | ||
68 | sst.w r17, PTO+PT_GPR(17)[ep]; \ | ||
69 | sst.w r18, PTO+PT_GPR(18)[ep]; \ | ||
70 | sst.w r19, PTO+PT_GPR(19)[ep] | ||
71 | #define RESTORE_CALL_CLOBBERED_REGS_BEFORE_ARGS \ | ||
72 | sld.w PTO+PT_GPR(1)[ep], r1; \ | ||
73 | sld.w PTO+PT_GPR(5)[ep], r5 | ||
74 | #define RESTORE_CALL_CLOBBERED_REGS_AFTER_RVAL \ | ||
75 | sld.w PTO+PT_GPR(12)[ep], r12; \ | ||
76 | sld.w PTO+PT_GPR(13)[ep], r13; \ | ||
77 | sld.w PTO+PT_GPR(14)[ep], r14; \ | ||
78 | sld.w PTO+PT_GPR(15)[ep], r15; \ | ||
79 | sld.w PTO+PT_GPR(16)[ep], r16; \ | ||
80 | sld.w PTO+PT_GPR(17)[ep], r17; \ | ||
81 | sld.w PTO+PT_GPR(18)[ep], r18; \ | ||
82 | sld.w PTO+PT_GPR(19)[ep], r19 | ||
83 | |||
84 | /* Save `call clobbered' registers to the state-save-frame pointed to by EP. */ | ||
85 | #define SAVE_CALL_CLOBBERED_REGS \ | ||
86 | SAVE_CALL_CLOBBERED_REGS_BEFORE_ARGS; \ | ||
87 | SAVE_ARG_REGS; \ | ||
88 | SAVE_RVAL_REGS; \ | ||
89 | SAVE_CALL_CLOBBERED_REGS_AFTER_RVAL | ||
90 | /* Restore `call clobbered' registers from the state-save-frame pointed to | ||
91 | by EP. */ | ||
92 | #define RESTORE_CALL_CLOBBERED_REGS \ | ||
93 | RESTORE_CALL_CLOBBERED_REGS_BEFORE_ARGS; \ | ||
94 | RESTORE_ARG_REGS; \ | ||
95 | RESTORE_RVAL_REGS; \ | ||
96 | RESTORE_CALL_CLOBBERED_REGS_AFTER_RVAL | ||
97 | |||
98 | /* Save `call clobbered' registers except for the return-value registers | ||
99 | to the state-save-frame pointed to by EP. */ | ||
100 | #define SAVE_CALL_CLOBBERED_REGS_NO_RVAL \ | ||
101 | SAVE_CALL_CLOBBERED_REGS_BEFORE_ARGS; \ | ||
102 | SAVE_ARG_REGS; \ | ||
103 | SAVE_CALL_CLOBBERED_REGS_AFTER_RVAL | ||
104 | /* Restore `call clobbered' registers except for the return-value registers | ||
105 | from the state-save-frame pointed to by EP. */ | ||
106 | #define RESTORE_CALL_CLOBBERED_REGS_NO_RVAL \ | ||
107 | RESTORE_CALL_CLOBBERED_REGS_BEFORE_ARGS; \ | ||
108 | RESTORE_ARG_REGS; \ | ||
109 | RESTORE_CALL_CLOBBERED_REGS_AFTER_RVAL | ||
110 | |||
111 | /* Save `call saved' registers to the state-save-frame pointed to by EP. */ | ||
112 | #define SAVE_CALL_SAVED_REGS \ | ||
113 | sst.w r2, PTO+PT_GPR(2)[ep]; \ | ||
114 | sst.w r20, PTO+PT_GPR(20)[ep]; \ | ||
115 | sst.w r21, PTO+PT_GPR(21)[ep]; \ | ||
116 | sst.w r22, PTO+PT_GPR(22)[ep]; \ | ||
117 | sst.w r23, PTO+PT_GPR(23)[ep]; \ | ||
118 | sst.w r24, PTO+PT_GPR(24)[ep]; \ | ||
119 | sst.w r25, PTO+PT_GPR(25)[ep]; \ | ||
120 | sst.w r26, PTO+PT_GPR(26)[ep]; \ | ||
121 | sst.w r27, PTO+PT_GPR(27)[ep]; \ | ||
122 | sst.w r28, PTO+PT_GPR(28)[ep]; \ | ||
123 | sst.w r29, PTO+PT_GPR(29)[ep] | ||
124 | /* Restore `call saved' registers from the state-save-frame pointed to by EP. */ | ||
125 | #define RESTORE_CALL_SAVED_REGS \ | ||
126 | sld.w PTO+PT_GPR(2)[ep], r2; \ | ||
127 | sld.w PTO+PT_GPR(20)[ep], r20; \ | ||
128 | sld.w PTO+PT_GPR(21)[ep], r21; \ | ||
129 | sld.w PTO+PT_GPR(22)[ep], r22; \ | ||
130 | sld.w PTO+PT_GPR(23)[ep], r23; \ | ||
131 | sld.w PTO+PT_GPR(24)[ep], r24; \ | ||
132 | sld.w PTO+PT_GPR(25)[ep], r25; \ | ||
133 | sld.w PTO+PT_GPR(26)[ep], r26; \ | ||
134 | sld.w PTO+PT_GPR(27)[ep], r27; \ | ||
135 | sld.w PTO+PT_GPR(28)[ep], r28; \ | ||
136 | sld.w PTO+PT_GPR(29)[ep], r29 | ||
137 | |||
138 | |||
139 | /* Save the PC stored in the special register SAVEREG to the state-save-frame | ||
140 | pointed to by EP. r19 is clobbered. */ | ||
141 | #define SAVE_PC(savereg) \ | ||
142 | stsr SR_ ## savereg, r19; \ | ||
143 | sst.w r19, PTO+PT_PC[ep] | ||
144 | /* Restore the PC from the state-save-frame pointed to by EP, to the special | ||
145 | register SAVEREG. LP is clobbered (it is used as a scratch register | ||
146 | because the POP_STATE macro restores it, and this macro is usually used | ||
147 | inside POP_STATE). */ | ||
148 | #define RESTORE_PC(savereg) \ | ||
149 | sld.w PTO+PT_PC[ep], lp; \ | ||
150 | ldsr lp, SR_ ## savereg | ||
151 | /* Save the PSW register stored in the special register SAVREG to the | ||
152 | state-save-frame pointed to by EP. r19 is clobbered. */ | ||
153 | #define SAVE_PSW(savereg) \ | ||
154 | stsr SR_ ## savereg, r19; \ | ||
155 | sst.w r19, PTO+PT_PSW[ep] | ||
156 | /* Restore the PSW register from the state-save-frame pointed to by EP, to | ||
157 | the special register SAVEREG. LP is clobbered (it is used as a scratch | ||
158 | register because the POP_STATE macro restores it, and this macro is | ||
159 | usually used inside POP_STATE). */ | ||
160 | #define RESTORE_PSW(savereg) \ | ||
161 | sld.w PTO+PT_PSW[ep], lp; \ | ||
162 | ldsr lp, SR_ ## savereg | ||
163 | |||
164 | /* Save CTPC/CTPSW/CTBP registers to the state-save-frame pointed to by REG. | ||
165 | r19 is clobbered. */ | ||
166 | #define SAVE_CT_REGS \ | ||
167 | stsr SR_CTPC, r19; \ | ||
168 | sst.w r19, PTO+PT_CTPC[ep]; \ | ||
169 | stsr SR_CTPSW, r19; \ | ||
170 | sst.w r19, PTO+PT_CTPSW[ep]; \ | ||
171 | stsr SR_CTBP, r19; \ | ||
172 | sst.w r19, PTO+PT_CTBP[ep] | ||
173 | /* Restore CTPC/CTPSW/CTBP registers from the state-save-frame pointed to by EP. | ||
174 | LP is clobbered (it is used as a scratch register because the POP_STATE | ||
175 | macro restores it, and this macro is usually used inside POP_STATE). */ | ||
176 | #define RESTORE_CT_REGS \ | ||
177 | sld.w PTO+PT_CTPC[ep], lp; \ | ||
178 | ldsr lp, SR_CTPC; \ | ||
179 | sld.w PTO+PT_CTPSW[ep], lp; \ | ||
180 | ldsr lp, SR_CTPSW; \ | ||
181 | sld.w PTO+PT_CTBP[ep], lp; \ | ||
182 | ldsr lp, SR_CTBP | ||
183 | |||
184 | |||
185 | /* Push register state, except for the stack pointer, on the stack in the | ||
186 | form of a state-save-frame (plus some extra padding), in preparation for | ||
187 | a system call. This macro makes sure that the EP, GP, and LP | ||
188 | registers are saved, and TYPE identifies the set of extra registers to | ||
189 | be saved as well. Also copies (the new value of) SP to EP. */ | ||
190 | #define PUSH_STATE(type) \ | ||
191 | addi -STATE_SAVE_SIZE, sp, sp; /* Make room on the stack. */ \ | ||
192 | st.w ep, PTO+PT_GPR(GPR_EP)[sp]; \ | ||
193 | mov sp, ep; \ | ||
194 | sst.w gp, PTO+PT_GPR(GPR_GP)[ep]; \ | ||
195 | sst.w lp, PTO+PT_GPR(GPR_LP)[ep]; \ | ||
196 | type ## _STATE_SAVER | ||
197 | /* Pop a register state pushed by PUSH_STATE, except for the stack pointer, | ||
198 | from the the stack. */ | ||
199 | #define POP_STATE(type) \ | ||
200 | mov sp, ep; \ | ||
201 | type ## _STATE_RESTORER; \ | ||
202 | sld.w PTO+PT_GPR(GPR_GP)[ep], gp; \ | ||
203 | sld.w PTO+PT_GPR(GPR_LP)[ep], lp; \ | ||
204 | sld.w PTO+PT_GPR(GPR_EP)[ep], ep; \ | ||
205 | addi STATE_SAVE_SIZE, sp, sp /* Clean up our stack space. */ | ||
206 | |||
207 | |||
208 | /* Switch to the kernel stack if necessary, and push register state on the | ||
209 | stack in the form of a state-save-frame. Also load the current task | ||
210 | pointer if switching from user mode. The stack-pointer (r3) should have | ||
211 | already been saved to the memory location SP_SAVE_LOC (the reason for | ||
212 | this is that the interrupt vectors may be beyond a 22-bit signed offset | ||
213 | jump from the actual interrupt handler, and this allows them to save the | ||
214 | stack-pointer and use that register to do an indirect jump). This macro | ||
215 | makes sure that `special' registers, system registers, and the stack | ||
216 | pointer are saved; TYPE identifies the set of extra registers to be | ||
217 | saved as well. SYSCALL_NUM is the register in which the system-call | ||
218 | number this state is for is stored (r0 if this isn't a system call). | ||
219 | Interrupts should already be disabled when calling this. */ | ||
220 | #define SAVE_STATE(type, syscall_num, sp_save_loc) \ | ||
221 | tst1 0, KM; /* See if already in kernel mode. */ \ | ||
222 | bz 1f; \ | ||
223 | ld.w sp_save_loc, sp; /* ... yes, use saved SP. */ \ | ||
224 | br 2f; \ | ||
225 | 1: ld.w KSP, sp; /* ... no, switch to kernel stack. */ \ | ||
226 | 2: PUSH_STATE(type); \ | ||
227 | ld.b KM, r19; /* Remember old kernel-mode. */ \ | ||
228 | sst.w r19, PTO+PT_KERNEL_MODE[ep]; \ | ||
229 | ld.w sp_save_loc, r19; /* Remember old SP. */ \ | ||
230 | sst.w r19, PTO+PT_GPR(GPR_SP)[ep]; \ | ||
231 | mov 1, r19; /* Now definitely in kernel-mode. */ \ | ||
232 | st.b r19, KM; \ | ||
233 | GET_CURRENT_TASK(CURRENT_TASK); /* Fetch the current task pointer. */ \ | ||
234 | /* Save away the syscall number. */ \ | ||
235 | sst.w syscall_num, PTO+PT_CUR_SYSCALL[ep] | ||
236 | |||
237 | |||
238 | /* Save register state not normally saved by PUSH_STATE for TYPE, to the | ||
239 | state-save-frame on the stack; also copies SP to EP. r19 may be trashed. */ | ||
240 | #define SAVE_EXTRA_STATE(type) \ | ||
241 | mov sp, ep; \ | ||
242 | type ## _EXTRA_STATE_SAVER | ||
243 | /* Restore register state not normally restored by POP_STATE for TYPE, | ||
244 | from the state-save-frame on the stack; also copies SP to EP. | ||
245 | r19 may be trashed. */ | ||
246 | #define RESTORE_EXTRA_STATE(type) \ | ||
247 | mov sp, ep; \ | ||
248 | type ## _EXTRA_STATE_RESTORER | ||
249 | |||
250 | /* Save any call-clobbered registers not normally saved by PUSH_STATE for | ||
251 | TYPE, to the state-save-frame on the stack. | ||
252 | EP may be trashed, but is not guaranteed to contain a copy of SP | ||
253 | (unlike after most SAVE_... macros). r19 may be trashed. */ | ||
254 | #define SAVE_EXTRA_STATE_FOR_SCHEDULE(type) \ | ||
255 | type ## _SCHEDULE_EXTRA_STATE_SAVER | ||
256 | /* Restore any call-clobbered registers not normally restored by | ||
257 | POP_STATE for TYPE, to the state-save-frame on the stack. | ||
258 | EP may be trashed, but is not guaranteed to contain a copy of SP | ||
259 | (unlike after most RESTORE_... macros). r19 may be trashed. */ | ||
260 | #define RESTORE_EXTRA_STATE_FOR_SCHEDULE(type) \ | ||
261 | type ## _SCHEDULE_EXTRA_STATE_RESTORER | ||
262 | |||
263 | |||
264 | /* These are extra_state_saver/restorer values for a user trap. Note | ||
265 | that we save the argument registers so that restarted syscalls will | ||
266 | function properly (otherwise it wouldn't be necessary), and we must | ||
267 | _not_ restore the return-value registers (so that traps can return a | ||
268 | value!), but call-clobbered registers are not saved at all, as the | ||
269 | caller of the syscall function should have saved them. */ | ||
270 | |||
271 | #define TRAP_RET reti | ||
272 | /* Traps don't save call-clobbered registers (but do still save arg regs). | ||
273 | We preserve PSw to keep long-term state, namely interrupt status (for traps | ||
274 | from kernel-mode), and the single-step flag (for user traps). */ | ||
275 | #define TRAP_STATE_SAVER \ | ||
276 | SAVE_ARG_REGS; \ | ||
277 | SAVE_PC(EIPC); \ | ||
278 | SAVE_PSW(EIPSW) | ||
279 | /* When traps return, they just leave call-clobbered registers (except for arg | ||
280 | regs) with whatever value they have from the kernel. Traps don't preserve | ||
281 | the PSW, but we zero EIPSW to ensure it doesn't contain anything dangerous | ||
282 | (in particular, the single-step flag). */ | ||
283 | #define TRAP_STATE_RESTORER \ | ||
284 | RESTORE_ARG_REGS; \ | ||
285 | RESTORE_PC(EIPC); \ | ||
286 | RESTORE_PSW(EIPSW) | ||
287 | /* Save registers not normally saved by traps. We need to save r12, even | ||
288 | though it's nominally call-clobbered, because it's used when restarting | ||
289 | a system call (the signal-handling path uses SAVE_EXTRA_STATE, and | ||
290 | expects r12 to be restored when the trap returns). */ | ||
291 | #define TRAP_EXTRA_STATE_SAVER \ | ||
292 | SAVE_RVAL_REGS; \ | ||
293 | sst.w r12, PTO+PT_GPR(12)[ep]; \ | ||
294 | SAVE_CALL_SAVED_REGS; \ | ||
295 | SAVE_CT_REGS | ||
296 | #define TRAP_EXTRA_STATE_RESTORER \ | ||
297 | RESTORE_RVAL_REGS; \ | ||
298 | sld.w PTO+PT_GPR(12)[ep], r12; \ | ||
299 | RESTORE_CALL_SAVED_REGS; \ | ||
300 | RESTORE_CT_REGS | ||
301 | /* Save registers prior to calling scheduler (just before trap returns). | ||
302 | We have to save the return-value registers to preserve the trap's return | ||
303 | value. Note that ..._SCHEDULE_EXTRA_STATE_SAVER, unlike most ..._SAVER | ||
304 | macros, is required to setup EP itself if EP is needed (this is because | ||
305 | in many cases, the macro is empty). */ | ||
306 | #define TRAP_SCHEDULE_EXTRA_STATE_SAVER \ | ||
307 | mov sp, ep; \ | ||
308 | SAVE_RVAL_REGS | ||
309 | /* Note that ..._SCHEDULE_EXTRA_STATE_RESTORER, unlike most ..._RESTORER | ||
310 | macros, is required to setup EP itself if EP is needed (this is because | ||
311 | in many cases, the macro is empty). */ | ||
312 | #define TRAP_SCHEDULE_EXTRA_STATE_RESTORER \ | ||
313 | mov sp, ep; \ | ||
314 | RESTORE_RVAL_REGS | ||
315 | |||
316 | /* Register saving/restoring for maskable interrupts. */ | ||
317 | #define IRQ_RET reti | ||
318 | #define IRQ_STATE_SAVER \ | ||
319 | SAVE_CALL_CLOBBERED_REGS; \ | ||
320 | SAVE_PC(EIPC); \ | ||
321 | SAVE_PSW(EIPSW) | ||
322 | #define IRQ_STATE_RESTORER \ | ||
323 | RESTORE_CALL_CLOBBERED_REGS; \ | ||
324 | RESTORE_PC(EIPC); \ | ||
325 | RESTORE_PSW(EIPSW) | ||
326 | #define IRQ_EXTRA_STATE_SAVER \ | ||
327 | SAVE_CALL_SAVED_REGS; \ | ||
328 | SAVE_CT_REGS | ||
329 | #define IRQ_EXTRA_STATE_RESTORER \ | ||
330 | RESTORE_CALL_SAVED_REGS; \ | ||
331 | RESTORE_CT_REGS | ||
332 | #define IRQ_SCHEDULE_EXTRA_STATE_SAVER /* nothing */ | ||
333 | #define IRQ_SCHEDULE_EXTRA_STATE_RESTORER /* nothing */ | ||
334 | |||
335 | /* Register saving/restoring for non-maskable interrupts. */ | ||
336 | #define NMI_RET reti | ||
337 | #define NMI_STATE_SAVER \ | ||
338 | SAVE_CALL_CLOBBERED_REGS; \ | ||
339 | SAVE_PC(FEPC); \ | ||
340 | SAVE_PSW(FEPSW); | ||
341 | #define NMI_STATE_RESTORER \ | ||
342 | RESTORE_CALL_CLOBBERED_REGS; \ | ||
343 | RESTORE_PC(FEPC); \ | ||
344 | RESTORE_PSW(FEPSW); | ||
345 | #define NMI_EXTRA_STATE_SAVER \ | ||
346 | SAVE_CALL_SAVED_REGS; \ | ||
347 | SAVE_CT_REGS | ||
348 | #define NMI_EXTRA_STATE_RESTORER \ | ||
349 | RESTORE_CALL_SAVED_REGS; \ | ||
350 | RESTORE_CT_REGS | ||
351 | #define NMI_SCHEDULE_EXTRA_STATE_SAVER /* nothing */ | ||
352 | #define NMI_SCHEDULE_EXTRA_STATE_RESTORER /* nothing */ | ||
353 | |||
354 | /* Register saving/restoring for debug traps. */ | ||
355 | #define DBTRAP_RET .long 0x014607E0 /* `dbret', but gas doesn't support it. */ | ||
356 | #define DBTRAP_STATE_SAVER \ | ||
357 | SAVE_CALL_CLOBBERED_REGS; \ | ||
358 | SAVE_PC(DBPC); \ | ||
359 | SAVE_PSW(DBPSW) | ||
360 | #define DBTRAP_STATE_RESTORER \ | ||
361 | RESTORE_CALL_CLOBBERED_REGS; \ | ||
362 | RESTORE_PC(DBPC); \ | ||
363 | RESTORE_PSW(DBPSW) | ||
364 | #define DBTRAP_EXTRA_STATE_SAVER \ | ||
365 | SAVE_CALL_SAVED_REGS; \ | ||
366 | SAVE_CT_REGS | ||
367 | #define DBTRAP_EXTRA_STATE_RESTORER \ | ||
368 | RESTORE_CALL_SAVED_REGS; \ | ||
369 | RESTORE_CT_REGS | ||
370 | #define DBTRAP_SCHEDULE_EXTRA_STATE_SAVER /* nothing */ | ||
371 | #define DBTRAP_SCHEDULE_EXTRA_STATE_RESTORER /* nothing */ | ||
372 | |||
373 | /* Register saving/restoring for a context switch. We don't need to save | ||
374 | too many registers, because context-switching looks like a function call | ||
375 | (via the function `switch_thread'), so callers will save any | ||
376 | call-clobbered registers themselves. We do need to save the CT regs, as | ||
377 | they're normally not saved during kernel entry (the kernel doesn't use | ||
378 | them). We save PSW so that interrupt-status state will correctly follow | ||
379 | each thread (mostly NMI vs. normal-IRQ/trap), though for the most part | ||
380 | it doesn't matter since threads are always in almost exactly the same | ||
381 | processor state during a context switch. The stack pointer and return | ||
382 | value are handled by switch_thread itself. */ | ||
383 | #define SWITCH_STATE_SAVER \ | ||
384 | SAVE_CALL_SAVED_REGS; \ | ||
385 | SAVE_PSW(PSW); \ | ||
386 | SAVE_CT_REGS | ||
387 | #define SWITCH_STATE_RESTORER \ | ||
388 | RESTORE_CALL_SAVED_REGS; \ | ||
389 | RESTORE_PSW(PSW); \ | ||
390 | RESTORE_CT_REGS | ||
391 | |||
392 | |||
393 | /* Restore register state from the state-save-frame on the stack, switch back | ||
394 | to the user stack if necessary, and return from the trap/interrupt. | ||
395 | EXTRA_STATE_RESTORER is a sequence of assembly language statements to | ||
396 | restore anything not restored by this macro. Only registers not saved by | ||
397 | the C compiler are restored (that is, R3(sp), R4(gp), R31(lp), and | ||
398 | anything restored by EXTRA_STATE_RESTORER). */ | ||
399 | #define RETURN(type) \ | ||
400 | ld.b PTO+PT_KERNEL_MODE[sp], r19; \ | ||
401 | di; /* Disable interrupts */ \ | ||
402 | cmp r19, r0; /* See if returning to kernel mode, */\ | ||
403 | bne 2f; /* ... if so, skip resched &c. */ \ | ||
404 | \ | ||
405 | /* We're returning to user mode, so check for various conditions that \ | ||
406 | trigger rescheduling. */ \ | ||
407 | GET_CURRENT_THREAD(r18); \ | ||
408 | ld.w TI_FLAGS[r18], r19; \ | ||
409 | andi _TIF_NEED_RESCHED, r19, r0; \ | ||
410 | bnz 3f; /* Call the scheduler. */ \ | ||
411 | 5: andi _TIF_SIGPENDING, r19, r18; \ | ||
412 | ld.w TASK_PTRACE[CURRENT_TASK], r19; /* ptrace flags */ \ | ||
413 | or r18, r19; /* see if either is non-zero */ \ | ||
414 | bnz 4f; /* if so, handle them */ \ | ||
415 | \ | ||
416 | /* Return to user state. */ \ | ||
417 | 1: st.b r0, KM; /* Now officially in user state. */ \ | ||
418 | \ | ||
419 | /* Final return. The stack-pointer fiddling is not needed when returning \ | ||
420 | to kernel-mode, but they don't hurt, and this way we can share the \ | ||
421 | (sometimes rather lengthy) POP_STATE macro. */ \ | ||
422 | 2: POP_STATE(type); \ | ||
423 | st.w sp, KSP; /* Save the kernel stack pointer. */ \ | ||
424 | ld.w PT_GPR(GPR_SP)-PT_SIZE[sp], sp; /* Restore stack pointer. */ \ | ||
425 | type ## _RET; /* Return from the trap/interrupt. */ \ | ||
426 | \ | ||
427 | /* Call the scheduler before returning from a syscall/trap. */ \ | ||
428 | 3: SAVE_EXTRA_STATE_FOR_SCHEDULE(type); /* Prepare to call scheduler. */ \ | ||
429 | jarl call_scheduler, lp; /* Call scheduler */ \ | ||
430 | di; /* The scheduler enables interrupts */\ | ||
431 | RESTORE_EXTRA_STATE_FOR_SCHEDULE(type); \ | ||
432 | GET_CURRENT_THREAD(r18); \ | ||
433 | ld.w TI_FLAGS[r18], r19; \ | ||
434 | br 5b; /* Continue with return path. */ \ | ||
435 | \ | ||
436 | /* Handle a signal or ptraced process return. \ | ||
437 | r18 should be non-zero if there are pending signals. */ \ | ||
438 | 4: /* Not all registers are saved by the normal trap/interrupt entry \ | ||
439 | points (for instance, call-saved registers (because the normal \ | ||
440 | C-compiler calling sequence in the kernel makes sure they're \ | ||
441 | preserved), and call-clobbered registers in the case of \ | ||
442 | traps), but signal handlers may want to examine or change the \ | ||
443 | complete register state. Here we save anything not saved by \ | ||
444 | the normal entry sequence, so that it may be safely restored \ | ||
445 | (in a possibly modified form) after do_signal returns. */ \ | ||
446 | SAVE_EXTRA_STATE(type); /* Save state not saved by entry. */ \ | ||
447 | jarl handle_signal_or_ptrace_return, lp; \ | ||
448 | RESTORE_EXTRA_STATE(type); /* Restore extra regs. */ \ | ||
449 | br 1b | ||
450 | |||
451 | |||
452 | /* Jump to the appropriate function for the system call number in r12 | ||
453 | (r12 is not preserved), or return an error if r12 is not valid. The | ||
454 | LP register should point to the location where the called function | ||
455 | should return. [note that MAKE_SYS_CALL uses label 1] */ | ||
456 | #define MAKE_SYS_CALL \ | ||
457 | /* Figure out which function to use for this system call. */ \ | ||
458 | shl 2, r12; \ | ||
459 | /* See if the system call number is valid. */ \ | ||
460 | addi lo(CSYM(sys_call_table) - sys_call_table_end), r12, r0; \ | ||
461 | bnh 1f; \ | ||
462 | mov hilo(CSYM(sys_call_table)), r19; \ | ||
463 | add r19, r12; \ | ||
464 | ld.w 0[r12], r12; \ | ||
465 | /* Make the system call. */ \ | ||
466 | jmp [r12]; \ | ||
467 | /* The syscall number is invalid, return an error. */ \ | ||
468 | 1: addi -ENOSYS, r0, r10; \ | ||
469 | jmp [lp] | ||
470 | |||
471 | |||
472 | .text | ||
473 | |||
474 | /* | ||
475 | * User trap. | ||
476 | * | ||
477 | * Trap 0 system calls are also handled here. | ||
478 | * | ||
479 | * The stack-pointer (r3) should have already been saved to the memory | ||
480 | * location ENTRY_SP (the reason for this is that the interrupt vectors may be | ||
481 | * beyond a 22-bit signed offset jump from the actual interrupt handler, and | ||
482 | * this allows them to save the stack-pointer and use that register to do an | ||
483 | * indirect jump). | ||
484 | * | ||
485 | * Syscall protocol: | ||
486 | * Syscall number in r12, args in r6-r9 | ||
487 | * Return value in r10 | ||
488 | */ | ||
489 | G_ENTRY(trap): | ||
490 | SAVE_STATE (TRAP, r12, ENTRY_SP) // Save registers. | ||
491 | stsr SR_ECR, r19 // Find out which trap it was. | ||
492 | ei // Enable interrupts. | ||
493 | mov hilo(ret_from_trap), lp // where the trap should return | ||
494 | |||
495 | // The following two shifts (1) clear out extraneous NMI data in the | ||
496 | // upper 16-bits, (2) convert the 0x40 - 0x5f range of trap ECR | ||
497 | // numbers into the (0-31) << 2 range we want, (3) set the flags. | ||
498 | shl 27, r19 // chop off all high bits | ||
499 | shr 25, r19 // scale back down and then << 2 | ||
500 | bnz 2f // See if not trap 0. | ||
501 | |||
502 | // Trap 0 is a `short' system call, skip general trap table. | ||
503 | MAKE_SYS_CALL // Jump to the syscall function. | ||
504 | |||
505 | 2: // For other traps, use a table lookup. | ||
506 | mov hilo(CSYM(trap_table)), r18 | ||
507 | add r19, r18 | ||
508 | ld.w 0[r18], r18 | ||
509 | jmp [r18] // Jump to the trap handler. | ||
510 | END(trap) | ||
511 | |||
512 | /* This is just like ret_from_trap, but first restores extra registers | ||
513 | saved by some wrappers. */ | ||
514 | L_ENTRY(restore_extra_regs_and_ret_from_trap): | ||
515 | RESTORE_EXTRA_STATE(TRAP) | ||
516 | // fall through | ||
517 | END(restore_extra_regs_and_ret_from_trap) | ||
518 | |||
519 | /* Entry point used to return from a syscall/trap. */ | ||
520 | L_ENTRY(ret_from_trap): | ||
521 | RETURN(TRAP) | ||
522 | END(ret_from_trap) | ||
523 | |||
524 | |||
525 | /* This the initial entry point for a new child thread, with an appropriate | ||
526 | stack in place that makes it look the the child is in the middle of an | ||
527 | syscall. This function is actually `returned to' from switch_thread | ||
528 | (copy_thread makes ret_from_fork the return address in each new thread's | ||
529 | saved context). */ | ||
530 | C_ENTRY(ret_from_fork): | ||
531 | mov r10, r6 // switch_thread returns the prev task. | ||
532 | jarl CSYM(schedule_tail), lp // ...which is schedule_tail's arg | ||
533 | mov r0, r10 // Child's fork call should return 0. | ||
534 | br ret_from_trap // Do normal trap return. | ||
535 | C_END(ret_from_fork) | ||
536 | |||
537 | |||
538 | /* | ||
539 | * Trap 1: `long' system calls | ||
540 | * `Long' syscall protocol: | ||
541 | * Syscall number in r12, args in r6-r9, r13-r14 | ||
542 | * Return value in r10 | ||
543 | */ | ||
544 | L_ENTRY(syscall_long): | ||
545 | // Push extra arguments on the stack. Note that by default, the trap | ||
546 | // handler reserves enough stack space for 6 arguments, so we don't | ||
547 | // have to make any additional room. | ||
548 | st.w r13, 16[sp] // arg 5 | ||
549 | st.w r14, 20[sp] // arg 6 | ||
550 | |||
551 | // Make sure r13 and r14 are preserved, in case we have to restart a | ||
552 | // system call because of a signal (ep has already been set by caller). | ||
553 | st.w r13, PTO+PT_GPR(13)[sp] | ||
554 | st.w r14, PTO+PT_GPR(13)[sp] | ||
555 | mov hilo(ret_from_long_syscall), lp | ||
556 | |||
557 | MAKE_SYS_CALL // Jump to the syscall function. | ||
558 | END(syscall_long) | ||
559 | |||
560 | /* Entry point used to return from a long syscall. Only needed to restore | ||
561 | r13/r14 if the general trap mechanism doesnt' do so. */ | ||
562 | L_ENTRY(ret_from_long_syscall): | ||
563 | ld.w PTO+PT_GPR(13)[sp], r13 // Restore the extra registers | ||
564 | ld.w PTO+PT_GPR(13)[sp], r14 | ||
565 | br ret_from_trap // The rest is the same as other traps | ||
566 | END(ret_from_long_syscall) | ||
567 | |||
568 | |||
569 | /* These syscalls need access to the struct pt_regs on the stack, so we | ||
570 | implement them in assembly (they're basically all wrappers anyway). */ | ||
571 | |||
572 | L_ENTRY(sys_fork_wrapper): | ||
573 | #ifdef CONFIG_MMU | ||
574 | addi SIGCHLD, r0, r6 // Arg 0: flags | ||
575 | ld.w PTO+PT_GPR(GPR_SP)[sp], r7 // Arg 1: child SP (use parent's) | ||
576 | movea PTO, sp, r8 // Arg 2: parent context | ||
577 | mov r0, r9 // Arg 3/4/5: 0 | ||
578 | st.w r0, 16[sp] | ||
579 | st.w r0, 20[sp] | ||
580 | mov hilo(CSYM(do_fork)), r18 // Where the real work gets done | ||
581 | br save_extra_state_tramp // Save state and go there | ||
582 | #else | ||
583 | // fork almost works, enough to trick you into looking elsewhere :-( | ||
584 | addi -EINVAL, r0, r10 | ||
585 | jmp [lp] | ||
586 | #endif | ||
587 | END(sys_fork_wrapper) | ||
588 | |||
589 | L_ENTRY(sys_vfork_wrapper): | ||
590 | addi CLONE_VFORK | CLONE_VM | SIGCHLD, r0, r6 // Arg 0: flags | ||
591 | ld.w PTO+PT_GPR(GPR_SP)[sp], r7 // Arg 1: child SP (use parent's) | ||
592 | movea PTO, sp, r8 // Arg 2: parent context | ||
593 | mov r0, r9 // Arg 3/4/5: 0 | ||
594 | st.w r0, 16[sp] | ||
595 | st.w r0, 20[sp] | ||
596 | mov hilo(CSYM(do_fork)), r18 // Where the real work gets done | ||
597 | br save_extra_state_tramp // Save state and go there | ||
598 | END(sys_vfork_wrapper) | ||
599 | |||
600 | L_ENTRY(sys_clone_wrapper): | ||
601 | ld.w PTO+PT_GPR(GPR_SP)[sp], r19// parent's stack pointer | ||
602 | cmp r7, r0 // See if child SP arg (arg 1) is 0. | ||
603 | cmov z, r19, r7, r7 // ... and use the parent's if so. | ||
604 | movea PTO, sp, r8 // Arg 2: parent context | ||
605 | mov r0, r9 // Arg 3/4/5: 0 | ||
606 | st.w r0, 16[sp] | ||
607 | st.w r0, 20[sp] | ||
608 | mov hilo(CSYM(do_fork)), r18 // Where the real work gets done | ||
609 | br save_extra_state_tramp // Save state and go there | ||
610 | END(sys_clone_wrapper) | ||
611 | |||
612 | |||
613 | L_ENTRY(sys_execve_wrapper): | ||
614 | movea PTO, sp, r9 // add user context as 4th arg | ||
615 | jr CSYM(sys_execve) // Do real work (tail-call). | ||
616 | END(sys_execve_wrapper) | ||
617 | |||
618 | |||
619 | L_ENTRY(sys_sigsuspend_wrapper): | ||
620 | movea PTO, sp, r7 // add user context as 2nd arg | ||
621 | mov hilo(CSYM(sys_sigsuspend)), r18 // syscall function | ||
622 | jarl save_extra_state_tramp, lp // Save state and do it | ||
623 | br restore_extra_regs_and_ret_from_trap | ||
624 | END(sys_sigsuspend_wrapper) | ||
625 | L_ENTRY(sys_rt_sigsuspend_wrapper): | ||
626 | movea PTO, sp, r8 // add user context as 3rd arg | ||
627 | mov hilo(CSYM(sys_rt_sigsuspend)), r18 // syscall function | ||
628 | jarl save_extra_state_tramp, lp // Save state and do it | ||
629 | br restore_extra_regs_and_ret_from_trap | ||
630 | END(sys_rt_sigsuspend_wrapper) | ||
631 | |||
632 | L_ENTRY(sys_sigreturn_wrapper): | ||
633 | movea PTO, sp, r6 // add user context as 1st arg | ||
634 | mov hilo(CSYM(sys_sigreturn)), r18 // syscall function | ||
635 | jarl save_extra_state_tramp, lp // Save state and do it | ||
636 | br restore_extra_regs_and_ret_from_trap | ||
637 | END(sys_sigreturn_wrapper) | ||
638 | L_ENTRY(sys_rt_sigreturn_wrapper): | ||
639 | movea PTO, sp, r6 // add user context as 1st arg | ||
640 | mov hilo(CSYM(sys_rt_sigreturn)), r18// syscall function | ||
641 | jarl save_extra_state_tramp, lp // Save state and do it | ||
642 | br restore_extra_regs_and_ret_from_trap | ||
643 | END(sys_rt_sigreturn_wrapper) | ||
644 | |||
645 | |||
646 | /* Save any state not saved by SAVE_STATE(TRAP), and jump to r18. | ||
647 | It's main purpose is to share the rather lengthy code sequence that | ||
648 | SAVE_STATE expands into among the above wrapper functions. */ | ||
649 | L_ENTRY(save_extra_state_tramp): | ||
650 | SAVE_EXTRA_STATE(TRAP) // Save state not saved by entry. | ||
651 | jmp [r18] // Do the work the caller wants | ||
652 | END(save_extra_state_tramp) | ||
653 | |||
654 | |||
655 | /* | ||
656 | * Hardware maskable interrupts. | ||
657 | * | ||
658 | * The stack-pointer (r3) should have already been saved to the memory | ||
659 | * location ENTRY_SP (the reason for this is that the interrupt vectors may be | ||
660 | * beyond a 22-bit signed offset jump from the actual interrupt handler, and | ||
661 | * this allows them to save the stack-pointer and use that register to do an | ||
662 | * indirect jump). | ||
663 | */ | ||
664 | G_ENTRY(irq): | ||
665 | SAVE_STATE (IRQ, r0, ENTRY_SP) // Save registers. | ||
666 | |||
667 | stsr SR_ECR, r6 // Find out which interrupt it was. | ||
668 | movea PTO, sp, r7 // User regs are arg2 | ||
669 | |||
670 | // All v850 implementations I know about encode their interrupts as | ||
671 | // multiples of 0x10, starting at 0x80 (after NMIs and software | ||
672 | // interrupts). Convert this number into a simple IRQ index for the | ||
673 | // rest of the kernel. We also clear the upper 16 bits, which hold | ||
674 | // NMI info, and don't appear to be cleared when a NMI returns. | ||
675 | shl 16, r6 // clear upper 16 bits | ||
676 | shr 20, r6 // shift back, and remove lower nibble | ||
677 | add -8, r6 // remove bias for irqs | ||
678 | |||
679 | // Call the high-level interrupt handling code. | ||
680 | jarl CSYM(handle_irq), lp | ||
681 | |||
682 | RETURN(IRQ) | ||
683 | END(irq) | ||
684 | |||
685 | |||
686 | /* | ||
687 | * Debug trap / illegal-instruction exception | ||
688 | * | ||
689 | * The stack-pointer (r3) should have already been saved to the memory | ||
690 | * location ENTRY_SP (the reason for this is that the interrupt vectors may be | ||
691 | * beyond a 22-bit signed offset jump from the actual interrupt handler, and | ||
692 | * this allows them to save the stack-pointer and use that register to do an | ||
693 | * indirect jump). | ||
694 | */ | ||
695 | G_ENTRY(dbtrap): | ||
696 | SAVE_STATE (DBTRAP, r0, ENTRY_SP)// Save registers. | ||
697 | |||
698 | /* First see if we came from kernel mode; if so, the dbtrap | ||
699 | instruction has a special meaning, to set the DIR (`debug | ||
700 | information register') register. This is because the DIR register | ||
701 | can _only_ be manipulated/read while in `debug mode,' and debug | ||
702 | mode is only active while we're inside the dbtrap handler. The | ||
703 | exact functionality is: { DIR = (DIR | r6) & ~r7; return DIR; }. */ | ||
704 | ld.b PTO+PT_KERNEL_MODE[sp], r19 | ||
705 | cmp r19, r0 | ||
706 | bz 1f | ||
707 | |||
708 | stsr SR_DIR, r10 | ||
709 | or r6, r10 | ||
710 | not r7, r7 | ||
711 | and r7, r10 | ||
712 | ldsr r10, SR_DIR | ||
713 | stsr SR_DIR, r10 // Confirm the value we set | ||
714 | st.w r10, PTO+PT_GPR(10)[sp] // return it | ||
715 | br 3f | ||
716 | |||
717 | 1: ei // Enable interrupts. | ||
718 | |||
719 | /* The default signal type we raise. */ | ||
720 | mov SIGTRAP, r6 | ||
721 | |||
722 | /* See if it's a single-step trap. */ | ||
723 | stsr SR_DBPSW, r19 | ||
724 | andi 0x0800, r19, r19 | ||
725 | bnz 2f | ||
726 | |||
727 | /* Look to see if the preceding instruction was is a dbtrap or not, | ||
728 | to decide which signal we should use. */ | ||
729 | stsr SR_DBPC, r19 // PC following trapping insn | ||
730 | ld.hu -2[r19], r19 | ||
731 | ori 0xf840, r0, r20 // DBTRAP insn | ||
732 | cmp r19, r20 // Was this trap caused by DBTRAP? | ||
733 | cmov ne, SIGILL, r6, r6 // Choose signal appropriately | ||
734 | |||
735 | /* Raise the desired signal. */ | ||
736 | 2: mov CURRENT_TASK, r7 // Arg 1: task | ||
737 | jarl CSYM(send_sig), lp // tail call | ||
738 | |||
739 | 3: RETURN(DBTRAP) | ||
740 | END(dbtrap) | ||
741 | |||
742 | |||
743 | /* | ||
744 | * Hardware non-maskable interrupts. | ||
745 | * | ||
746 | * The stack-pointer (r3) should have already been saved to the memory | ||
747 | * location ENTRY_SP (the reason for this is that the interrupt vectors may be | ||
748 | * beyond a 22-bit signed offset jump from the actual interrupt handler, and | ||
749 | * this allows them to save the stack-pointer and use that register to do an | ||
750 | * indirect jump). | ||
751 | */ | ||
752 | G_ENTRY(nmi): | ||
753 | SAVE_STATE (NMI, r0, NMI_ENTRY_SP); /* Save registers. */ | ||
754 | |||
755 | stsr SR_ECR, r6; /* Find out which nmi it was. */ | ||
756 | shr 20, r6; /* Extract NMI code in bits 20-24. */ | ||
757 | movea PTO, sp, r7; /* User regs are arg2. */ | ||
758 | |||
759 | /* Non-maskable interrupts always lie right after maskable interrupts. | ||
760 | Call the generic IRQ handler, with two arguments, the IRQ number, | ||
761 | and a pointer to the user registers, to handle the specifics. | ||
762 | (we subtract one because the first NMI has code 1). */ | ||
763 | addi FIRST_NMI - 1, r6, r6 | ||
764 | jarl CSYM(handle_irq), lp | ||
765 | |||
766 | RETURN(NMI) | ||
767 | END(nmi) | ||
768 | |||
769 | |||
770 | /* | ||
771 | * Trap with no handler | ||
772 | */ | ||
773 | L_ENTRY(bad_trap_wrapper): | ||
774 | mov r19, r6 // Arg 0: trap number | ||
775 | movea PTO, sp, r7 // Arg 1: user regs | ||
776 | jr CSYM(bad_trap) // tail call handler | ||
777 | END(bad_trap_wrapper) | ||
778 | |||
779 | |||
780 | /* | ||
781 | * Invoke the scheduler, called from the trap/irq kernel exit path. | ||
782 | * | ||
783 | * This basically just calls `schedule', but also arranges for extra | ||
784 | * registers to be saved for ptrace'd processes, so ptrace can modify them. | ||
785 | */ | ||
786 | L_ENTRY(call_scheduler): | ||
787 | ld.w TASK_PTRACE[CURRENT_TASK], r19 // See if task is ptrace'd | ||
788 | cmp r19, r0 | ||
789 | bnz 1f // ... yes, do special stuff | ||
790 | jr CSYM(schedule) // ... no, just tail-call scheduler | ||
791 | |||
792 | // Save extra regs for ptrace'd task. We want to save anything | ||
793 | // that would otherwise only be `implicitly' saved by the normal | ||
794 | // compiler calling-convention. | ||
795 | 1: mov sp, ep // Setup EP for SAVE_CALL_SAVED_REGS | ||
796 | SAVE_CALL_SAVED_REGS // Save call-saved registers to stack | ||
797 | mov lp, r20 // Save LP in a callee-saved register | ||
798 | |||
799 | jarl CSYM(schedule), lp // Call scheduler | ||
800 | |||
801 | mov r20, lp | ||
802 | mov sp, ep // We can't rely on EP after return | ||
803 | RESTORE_CALL_SAVED_REGS // Restore (possibly modified) regs | ||
804 | jmp [lp] // Return to the return path | ||
805 | END(call_scheduler) | ||
806 | |||
807 | |||
808 | /* | ||
809 | * This is an out-of-line handler for two special cases during the kernel | ||
810 | * trap/irq exit sequence: | ||
811 | * | ||
812 | * (1) If r18 is non-zero then a signal needs to be handled, which is | ||
813 | * done, and then the caller returned to. | ||
814 | * | ||
815 | * (2) If r18 is non-zero then we're returning to a ptraced process, which | ||
816 | * has several special cases -- single-stepping and trap tracing, both | ||
817 | * of which require using the `dbret' instruction to exit the kernel | ||
818 | * instead of the normal `reti' (this is because the CPU not correctly | ||
819 | * single-step after a reti). In this case, of course, this handler | ||
820 | * never returns to the caller. | ||
821 | * | ||
822 | * In either case, all registers should have been saved to the current | ||
823 | * state-save-frame on the stack, except for callee-saved registers. | ||
824 | * | ||
825 | * [These two different cases are combined merely to avoid bloating the | ||
826 | * macro-inlined code, not because they really make much sense together!] | ||
827 | */ | ||
828 | L_ENTRY(handle_signal_or_ptrace_return): | ||
829 | cmp r18, r0 // See if handling a signal | ||
830 | bz 1f // ... nope, go do ptrace return | ||
831 | |||
832 | // Handle a signal | ||
833 | mov lp, r20 // Save link-pointer | ||
834 | mov r10, r21 // Save return-values (for trap) | ||
835 | mov r11, r22 | ||
836 | |||
837 | movea PTO, sp, r6 // Arg 1: struct pt_regs *regs | ||
838 | mov r0, r7 // Arg 2: sigset_t *oldset | ||
839 | jarl CSYM(do_signal), lp // Handle the signal | ||
840 | di // sig handling enables interrupts | ||
841 | |||
842 | mov r20, lp // Restore link-pointer | ||
843 | mov r21, r10 // Restore return-values (for trap) | ||
844 | mov r22, r11 | ||
845 | ld.w TASK_PTRACE[CURRENT_TASK], r19 // check ptrace flags too | ||
846 | cmp r19, r0 | ||
847 | bnz 1f // ... some set, so look more | ||
848 | 2: jmp [lp] // ... none set, so return normally | ||
849 | |||
850 | // ptrace return | ||
851 | 1: ld.w PTO+PT_PSW[sp], r19 // Look at user-processes's flags | ||
852 | andi 0x0800, r19, r19 // See if single-step flag is set | ||
853 | bz 2b // ... nope, return normally | ||
854 | |||
855 | // Return as if from a dbtrap insn | ||
856 | st.b r0, KM // Now officially in user state. | ||
857 | POP_STATE(DBTRAP) // Restore regs | ||
858 | st.w sp, KSP // Save the kernel stack pointer. | ||
859 | ld.w PT_GPR(GPR_SP)-PT_SIZE[sp], sp // Restore user stack pointer. | ||
860 | DBTRAP_RET // Return from the trap/interrupt. | ||
861 | END(handle_signal_or_ptrace_return) | ||
862 | |||
863 | |||
864 | /* | ||
865 | * This is where we switch between two threads. The arguments are: | ||
866 | * r6 -- pointer to the struct thread for the `current' process | ||
867 | * r7 -- pointer to the struct thread for the `new' process. | ||
868 | * when this function returns, it will return to the new thread. | ||
869 | */ | ||
870 | C_ENTRY(switch_thread): | ||
871 | // Return the previous task (r10 is not clobbered by restore below) | ||
872 | mov CURRENT_TASK, r10 | ||
873 | // First, push the current processor state on the stack | ||
874 | PUSH_STATE(SWITCH) | ||
875 | // Now save the location of the kernel stack pointer for this thread; | ||
876 | // since we've pushed all other state on the stack, this is enough to | ||
877 | // restore it all later. | ||
878 | st.w sp, THREAD_KSP[r6] | ||
879 | // Now restore the stack pointer from the new process | ||
880 | ld.w THREAD_KSP[r7], sp | ||
881 | // ... and restore all state from that | ||
882 | POP_STATE(SWITCH) | ||
883 | // Update the current task pointer | ||
884 | GET_CURRENT_TASK(CURRENT_TASK) | ||
885 | // Now return into the new thread | ||
886 | jmp [lp] | ||
887 | C_END(switch_thread) | ||
888 | |||
889 | |||
890 | .data | ||
891 | |||
892 | .align 4 | ||
893 | C_DATA(trap_table): | ||
894 | .long bad_trap_wrapper // trap 0, doesn't use trap table. | ||
895 | .long syscall_long // trap 1, `long' syscall. | ||
896 | .long bad_trap_wrapper | ||
897 | .long bad_trap_wrapper | ||
898 | .long bad_trap_wrapper | ||
899 | .long bad_trap_wrapper | ||
900 | .long bad_trap_wrapper | ||
901 | .long bad_trap_wrapper | ||
902 | .long bad_trap_wrapper | ||
903 | .long bad_trap_wrapper | ||
904 | .long bad_trap_wrapper | ||
905 | .long bad_trap_wrapper | ||
906 | .long bad_trap_wrapper | ||
907 | .long bad_trap_wrapper | ||
908 | .long bad_trap_wrapper | ||
909 | .long bad_trap_wrapper | ||
910 | C_END(trap_table) | ||
911 | |||
912 | |||
913 | .section .rodata | ||
914 | |||
915 | .align 4 | ||
916 | C_DATA(sys_call_table): | ||
917 | .long CSYM(sys_restart_syscall) // 0 | ||
918 | .long CSYM(sys_exit) | ||
919 | .long sys_fork_wrapper | ||
920 | .long CSYM(sys_read) | ||
921 | .long CSYM(sys_write) | ||
922 | .long CSYM(sys_open) // 5 | ||
923 | .long CSYM(sys_close) | ||
924 | .long CSYM(sys_waitpid) | ||
925 | .long CSYM(sys_creat) | ||
926 | .long CSYM(sys_link) | ||
927 | .long CSYM(sys_unlink) // 10 | ||
928 | .long sys_execve_wrapper | ||
929 | .long CSYM(sys_chdir) | ||
930 | .long CSYM(sys_time) | ||
931 | .long CSYM(sys_mknod) | ||
932 | .long CSYM(sys_chmod) // 15 | ||
933 | .long CSYM(sys_chown) | ||
934 | .long CSYM(sys_ni_syscall) // was: break | ||
935 | .long CSYM(sys_ni_syscall) // was: oldstat (aka stat) | ||
936 | .long CSYM(sys_lseek) | ||
937 | .long CSYM(sys_getpid) // 20 | ||
938 | .long CSYM(sys_mount) | ||
939 | .long CSYM(sys_oldumount) | ||
940 | .long CSYM(sys_setuid) | ||
941 | .long CSYM(sys_getuid) | ||
942 | .long CSYM(sys_stime) // 25 | ||
943 | .long CSYM(sys_ptrace) | ||
944 | .long CSYM(sys_alarm) | ||
945 | .long CSYM(sys_ni_syscall) // was: oldfstat (aka fstat) | ||
946 | .long CSYM(sys_pause) | ||
947 | .long CSYM(sys_utime) // 30 | ||
948 | .long CSYM(sys_ni_syscall) // was: stty | ||
949 | .long CSYM(sys_ni_syscall) // was: gtty | ||
950 | .long CSYM(sys_access) | ||
951 | .long CSYM(sys_nice) | ||
952 | .long CSYM(sys_ni_syscall) // 35, was: ftime | ||
953 | .long CSYM(sys_sync) | ||
954 | .long CSYM(sys_kill) | ||
955 | .long CSYM(sys_rename) | ||
956 | .long CSYM(sys_mkdir) | ||
957 | .long CSYM(sys_rmdir) // 40 | ||
958 | .long CSYM(sys_dup) | ||
959 | .long CSYM(sys_pipe) | ||
960 | .long CSYM(sys_times) | ||
961 | .long CSYM(sys_ni_syscall) // was: prof | ||
962 | .long CSYM(sys_brk) // 45 | ||
963 | .long CSYM(sys_setgid) | ||
964 | .long CSYM(sys_getgid) | ||
965 | .long CSYM(sys_signal) | ||
966 | .long CSYM(sys_geteuid) | ||
967 | .long CSYM(sys_getegid) // 50 | ||
968 | .long CSYM(sys_acct) | ||
969 | .long CSYM(sys_umount) // recycled never used phys() | ||
970 | .long CSYM(sys_ni_syscall) // was: lock | ||
971 | .long CSYM(sys_ioctl) | ||
972 | .long CSYM(sys_fcntl) // 55 | ||
973 | .long CSYM(sys_ni_syscall) // was: mpx | ||
974 | .long CSYM(sys_setpgid) | ||
975 | .long CSYM(sys_ni_syscall) // was: ulimit | ||
976 | .long CSYM(sys_ni_syscall) | ||
977 | .long CSYM(sys_umask) // 60 | ||
978 | .long CSYM(sys_chroot) | ||
979 | .long CSYM(sys_ustat) | ||
980 | .long CSYM(sys_dup2) | ||
981 | .long CSYM(sys_getppid) | ||
982 | .long CSYM(sys_getpgrp) // 65 | ||
983 | .long CSYM(sys_setsid) | ||
984 | .long CSYM(sys_sigaction) | ||
985 | .long CSYM(sys_sgetmask) | ||
986 | .long CSYM(sys_ssetmask) | ||
987 | .long CSYM(sys_setreuid) // 70 | ||
988 | .long CSYM(sys_setregid) | ||
989 | .long sys_sigsuspend_wrapper | ||
990 | .long CSYM(sys_sigpending) | ||
991 | .long CSYM(sys_sethostname) | ||
992 | .long CSYM(sys_setrlimit) // 75 | ||
993 | .long CSYM(sys_getrlimit) | ||
994 | .long CSYM(sys_getrusage) | ||
995 | .long CSYM(sys_gettimeofday) | ||
996 | .long CSYM(sys_settimeofday) | ||
997 | .long CSYM(sys_getgroups) // 80 | ||
998 | .long CSYM(sys_setgroups) | ||
999 | .long CSYM(sys_select) | ||
1000 | .long CSYM(sys_symlink) | ||
1001 | .long CSYM(sys_ni_syscall) // was: oldlstat (aka lstat) | ||
1002 | .long CSYM(sys_readlink) // 85 | ||
1003 | .long CSYM(sys_uselib) | ||
1004 | .long CSYM(sys_swapon) | ||
1005 | .long CSYM(sys_reboot) | ||
1006 | .long CSYM(old_readdir) | ||
1007 | .long CSYM(sys_mmap) // 90 | ||
1008 | .long CSYM(sys_munmap) | ||
1009 | .long CSYM(sys_truncate) | ||
1010 | .long CSYM(sys_ftruncate) | ||
1011 | .long CSYM(sys_fchmod) | ||
1012 | .long CSYM(sys_fchown) // 95 | ||
1013 | .long CSYM(sys_getpriority) | ||
1014 | .long CSYM(sys_setpriority) | ||
1015 | .long CSYM(sys_ni_syscall) // was: profil | ||
1016 | .long CSYM(sys_statfs) | ||
1017 | .long CSYM(sys_fstatfs) // 100 | ||
1018 | .long CSYM(sys_ni_syscall) // i386: ioperm | ||
1019 | .long CSYM(sys_socketcall) | ||
1020 | .long CSYM(sys_syslog) | ||
1021 | .long CSYM(sys_setitimer) | ||
1022 | .long CSYM(sys_getitimer) // 105 | ||
1023 | .long CSYM(sys_newstat) | ||
1024 | .long CSYM(sys_newlstat) | ||
1025 | .long CSYM(sys_newfstat) | ||
1026 | .long CSYM(sys_ni_syscall) // was: olduname (aka uname) | ||
1027 | .long CSYM(sys_ni_syscall) // 110, i386: iopl | ||
1028 | .long CSYM(sys_vhangup) | ||
1029 | .long CSYM(sys_ni_syscall) // was: idle | ||
1030 | .long CSYM(sys_ni_syscall) // i386: vm86old | ||
1031 | .long CSYM(sys_wait4) | ||
1032 | .long CSYM(sys_swapoff) // 115 | ||
1033 | .long CSYM(sys_sysinfo) | ||
1034 | .long CSYM(sys_ipc) | ||
1035 | .long CSYM(sys_fsync) | ||
1036 | .long sys_sigreturn_wrapper | ||
1037 | .long sys_clone_wrapper // 120 | ||
1038 | .long CSYM(sys_setdomainname) | ||
1039 | .long CSYM(sys_newuname) | ||
1040 | .long CSYM(sys_ni_syscall) // i386: modify_ldt, m68k: cacheflush | ||
1041 | .long CSYM(sys_adjtimex) | ||
1042 | .long CSYM(sys_ni_syscall) // 125 - sys_mprotect | ||
1043 | .long CSYM(sys_sigprocmask) | ||
1044 | .long CSYM(sys_ni_syscall) // sys_create_module | ||
1045 | .long CSYM(sys_init_module) | ||
1046 | .long CSYM(sys_delete_module) | ||
1047 | .long CSYM(sys_ni_syscall) // 130 - sys_get_kernel_syms | ||
1048 | .long CSYM(sys_quotactl) | ||
1049 | .long CSYM(sys_getpgid) | ||
1050 | .long CSYM(sys_fchdir) | ||
1051 | .long CSYM(sys_bdflush) | ||
1052 | .long CSYM(sys_sysfs) // 135 | ||
1053 | .long CSYM(sys_personality) | ||
1054 | .long CSYM(sys_ni_syscall) // for afs_syscall | ||
1055 | .long CSYM(sys_setfsuid) | ||
1056 | .long CSYM(sys_setfsgid) | ||
1057 | .long CSYM(sys_llseek) // 140 | ||
1058 | .long CSYM(sys_getdents) | ||
1059 | .long CSYM(sys_select) // for backward compat; remove someday | ||
1060 | .long CSYM(sys_flock) | ||
1061 | .long CSYM(sys_ni_syscall) // sys_msync | ||
1062 | .long CSYM(sys_readv) // 145 | ||
1063 | .long CSYM(sys_writev) | ||
1064 | .long CSYM(sys_getsid) | ||
1065 | .long CSYM(sys_fdatasync) | ||
1066 | .long CSYM(sys_sysctl) | ||
1067 | .long CSYM(sys_ni_syscall) // 150 - sys_mlock | ||
1068 | .long CSYM(sys_ni_syscall) // sys_munlock | ||
1069 | .long CSYM(sys_ni_syscall) // sys_mlockall | ||
1070 | .long CSYM(sys_ni_syscall) // sys_munlockall | ||
1071 | .long CSYM(sys_sched_setparam) | ||
1072 | .long CSYM(sys_sched_getparam) // 155 | ||
1073 | .long CSYM(sys_sched_setscheduler) | ||
1074 | .long CSYM(sys_sched_getscheduler) | ||
1075 | .long CSYM(sys_sched_yield) | ||
1076 | .long CSYM(sys_sched_get_priority_max) | ||
1077 | .long CSYM(sys_sched_get_priority_min) // 160 | ||
1078 | .long CSYM(sys_sched_rr_get_interval) | ||
1079 | .long CSYM(sys_nanosleep) | ||
1080 | .long CSYM(sys_ni_syscall) // sys_mremap | ||
1081 | .long CSYM(sys_setresuid) | ||
1082 | .long CSYM(sys_getresuid) // 165 | ||
1083 | .long CSYM(sys_ni_syscall) // for vm86 | ||
1084 | .long CSYM(sys_ni_syscall) // sys_query_module | ||
1085 | .long CSYM(sys_poll) | ||
1086 | .long CSYM(sys_nfsservctl) | ||
1087 | .long CSYM(sys_setresgid) // 170 | ||
1088 | .long CSYM(sys_getresgid) | ||
1089 | .long CSYM(sys_prctl) | ||
1090 | .long sys_rt_sigreturn_wrapper | ||
1091 | .long CSYM(sys_rt_sigaction) | ||
1092 | .long CSYM(sys_rt_sigprocmask) // 175 | ||
1093 | .long CSYM(sys_rt_sigpending) | ||
1094 | .long CSYM(sys_rt_sigtimedwait) | ||
1095 | .long CSYM(sys_rt_sigqueueinfo) | ||
1096 | .long sys_rt_sigsuspend_wrapper | ||
1097 | .long CSYM(sys_pread64) // 180 | ||
1098 | .long CSYM(sys_pwrite64) | ||
1099 | .long CSYM(sys_lchown) | ||
1100 | .long CSYM(sys_getcwd) | ||
1101 | .long CSYM(sys_capget) | ||
1102 | .long CSYM(sys_capset) // 185 | ||
1103 | .long CSYM(sys_sigaltstack) | ||
1104 | .long CSYM(sys_sendfile) | ||
1105 | .long CSYM(sys_ni_syscall) // streams1 | ||
1106 | .long CSYM(sys_ni_syscall) // streams2 | ||
1107 | .long sys_vfork_wrapper // 190 | ||
1108 | .long CSYM(sys_ni_syscall) | ||
1109 | .long CSYM(sys_mmap2) | ||
1110 | .long CSYM(sys_truncate64) | ||
1111 | .long CSYM(sys_ftruncate64) | ||
1112 | .long CSYM(sys_stat64) // 195 | ||
1113 | .long CSYM(sys_lstat64) | ||
1114 | .long CSYM(sys_fstat64) | ||
1115 | .long CSYM(sys_fcntl64) | ||
1116 | .long CSYM(sys_getdents64) | ||
1117 | .long CSYM(sys_pivot_root) // 200 | ||
1118 | .long CSYM(sys_gettid) | ||
1119 | .long CSYM(sys_tkill) | ||
1120 | sys_call_table_end: | ||
1121 | C_END(sys_call_table) | ||