diff options
author | GuanXuetao <gxt@mprc.pku.edu.cn> | 2011-01-15 05:15:45 -0500 |
---|---|---|
committer | GuanXuetao <gxt@mprc.pku.edu.cn> | 2011-03-16 21:19:06 -0400 |
commit | 141c943fd4b323bae2b47f67743dba96134afb1f (patch) | |
tree | 0482b81478e8b40ce06eeebe7f4ed88aafc593c0 /arch/unicore32/kernel/entry.S | |
parent | 79725df5786d2fa48f582b116ea1d74193cc96ca (diff) |
unicore32 core architecture: low level entry and setup codes
This patch implements low level entry and setup codes.
Signed-off-by: Guan Xuetao <gxt@mprc.pku.edu.cn>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/unicore32/kernel/entry.S')
-rw-r--r-- | arch/unicore32/kernel/entry.S | 824 |
1 files changed, 824 insertions, 0 deletions
diff --git a/arch/unicore32/kernel/entry.S b/arch/unicore32/kernel/entry.S new file mode 100644 index 000000000000..83698b7c8f5b --- /dev/null +++ b/arch/unicore32/kernel/entry.S | |||
@@ -0,0 +1,824 @@ | |||
1 | /* | ||
2 | * linux/arch/unicore32/kernel/entry.S | ||
3 | * | ||
4 | * Code specific to PKUnity SoC and UniCore ISA | ||
5 | * | ||
6 | * Copyright (C) 2001-2010 GUAN Xue-tao | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * Low-level vector interface routines | ||
13 | */ | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/linkage.h> | ||
16 | #include <asm/assembler.h> | ||
17 | #include <asm/errno.h> | ||
18 | #include <asm/thread_info.h> | ||
19 | #include <asm/memory.h> | ||
20 | #include <asm/unistd.h> | ||
21 | #include <generated/asm-offsets.h> | ||
22 | #include "debug-macro.S" | ||
23 | |||
24 | @ | ||
25 | @ Most of the stack format comes from struct pt_regs, but with | ||
26 | @ the addition of 8 bytes for storing syscall args 5 and 6. | ||
27 | @ | ||
28 | #define S_OFF 8 | ||
29 | |||
30 | /* | ||
31 | * The SWI code relies on the fact that R0 is at the bottom of the stack | ||
32 | * (due to slow/fast restore user regs). | ||
33 | */ | ||
34 | #if S_R0 != 0 | ||
35 | #error "Please fix" | ||
36 | #endif | ||
37 | |||
38 | .macro zero_fp | ||
39 | #ifdef CONFIG_FRAME_POINTER | ||
40 | mov fp, #0 | ||
41 | #endif | ||
42 | .endm | ||
43 | |||
44 | .macro alignment_trap, rtemp | ||
45 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
46 | ldw \rtemp, .LCcralign | ||
47 | ldw \rtemp, [\rtemp] | ||
48 | movc p0.c1, \rtemp, #0 | ||
49 | #endif | ||
50 | .endm | ||
51 | |||
52 | .macro load_user_sp_lr, rd, rtemp, offset = 0 | ||
53 | mov \rtemp, asr | ||
54 | xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE) | ||
55 | mov.a asr, \rtemp @ switch to the SUSR mode | ||
56 | |||
57 | ldw sp, [\rd+], #\offset @ load sp_user | ||
58 | ldw lr, [\rd+], #\offset + 4 @ load lr_user | ||
59 | |||
60 | xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE) | ||
61 | mov.a asr, \rtemp @ switch back to the PRIV mode | ||
62 | .endm | ||
63 | |||
64 | .macro priv_exit, rpsr | ||
65 | mov.a bsr, \rpsr | ||
66 | ldm.w (r0 - r15), [sp]+ | ||
67 | ldm.b (r16 - pc), [sp]+ @ load r0 - pc, asr | ||
68 | .endm | ||
69 | |||
70 | .macro restore_user_regs, fast = 0, offset = 0 | ||
71 | ldw r1, [sp+], #\offset + S_PSR @ get calling asr | ||
72 | ldw lr, [sp+], #\offset + S_PC @ get pc | ||
73 | mov.a bsr, r1 @ save in bsr_priv | ||
74 | .if \fast | ||
75 | add sp, sp, #\offset + S_R1 @ r0 is syscall return value | ||
76 | ldm.w (r1 - r15), [sp]+ @ get calling r1 - r15 | ||
77 | ldur (r16 - lr), [sp]+ @ get calling r16 - lr | ||
78 | .else | ||
79 | ldm.w (r0 - r15), [sp]+ @ get calling r0 - r15 | ||
80 | ldur (r16 - lr), [sp]+ @ get calling r16 - lr | ||
81 | .endif | ||
82 | nop | ||
83 | add sp, sp, #S_FRAME_SIZE - S_R16 | ||
84 | mov.a pc, lr @ return | ||
85 | @ and move bsr_priv into asr | ||
86 | .endm | ||
87 | |||
88 | .macro get_thread_info, rd | ||
89 | mov \rd, sp >> #13 | ||
90 | mov \rd, \rd << #13 | ||
91 | .endm | ||
92 | |||
93 | .macro get_irqnr_and_base, irqnr, irqstat, base, tmp | ||
94 | ldw \base, =(io_p2v(PKUNITY_INTC_BASE)) | ||
95 | ldw \irqstat, [\base+], #0xC @ INTC_ICIP | ||
96 | ldw \tmp, [\base+], #0x4 @ INTC_ICMR | ||
97 | and.a \irqstat, \irqstat, \tmp | ||
98 | beq 1001f | ||
99 | cntlz \irqnr, \irqstat | ||
100 | rsub \irqnr, \irqnr, #31 | ||
101 | 1001: /* EQ will be set if no irqs pending */ | ||
102 | .endm | ||
103 | |||
104 | #ifdef CONFIG_DEBUG_LL | ||
105 | .macro printreg, reg, temp | ||
106 | adr \temp, 901f | ||
107 | stm (r0-r3), [\temp]+ | ||
108 | stw lr, [\temp+], #0x10 | ||
109 | mov r0, \reg | ||
110 | b.l printhex8 | ||
111 | mov r0, #':' | ||
112 | b.l printch | ||
113 | mov r0, pc | ||
114 | b.l printhex8 | ||
115 | adr r0, 902f | ||
116 | b.l printascii | ||
117 | adr \temp, 901f | ||
118 | ldm (r0-r3), [\temp]+ | ||
119 | ldw lr, [\temp+], #0x10 | ||
120 | b 903f | ||
121 | 901: .word 0, 0, 0, 0, 0 @ r0-r3, lr | ||
122 | 902: .asciz ": epip4d\n" | ||
123 | .align | ||
124 | 903: | ||
125 | .endm | ||
126 | #endif | ||
127 | |||
128 | /* | ||
129 | * These are the registers used in the syscall handler, and allow us to | ||
130 | * have in theory up to 7 arguments to a function - r0 to r6. | ||
131 | * | ||
132 | * Note that tbl == why is intentional. | ||
133 | * | ||
134 | * We must set at least "tsk" and "why" when calling ret_with_reschedule. | ||
135 | */ | ||
136 | scno .req r21 @ syscall number | ||
137 | tbl .req r22 @ syscall table pointer | ||
138 | why .req r22 @ Linux syscall (!= 0) | ||
139 | tsk .req r23 @ current thread_info | ||
140 | |||
141 | /* | ||
142 | * Interrupt handling. Preserves r17, r18, r19 | ||
143 | */ | ||
144 | .macro intr_handler | ||
145 | 1: get_irqnr_and_base r0, r6, r5, lr | ||
146 | beq 2f | ||
147 | mov r1, sp | ||
148 | @ | ||
149 | @ routine called with r0 = irq number, r1 = struct pt_regs * | ||
150 | @ | ||
151 | adr lr, 1b | ||
152 | b asm_do_IRQ | ||
153 | 2: | ||
154 | .endm | ||
155 | |||
156 | /* | ||
157 | * PRIV mode handlers | ||
158 | */ | ||
159 | .macro priv_entry | ||
160 | sub sp, sp, #(S_FRAME_SIZE - 4) | ||
161 | stm (r1 - r15), [sp]+ | ||
162 | add r5, sp, #S_R15 | ||
163 | stm (r16 - r28), [r5]+ | ||
164 | |||
165 | ldm (r1 - r3), [r0]+ | ||
166 | add r5, sp, #S_SP - 4 @ here for interlock avoidance | ||
167 | mov r4, #-1 @ "" "" "" "" | ||
168 | add r0, sp, #(S_FRAME_SIZE - 4) | ||
169 | stw.w r1, [sp+], #-4 @ save the "real" r0 copied | ||
170 | @ from the exception stack | ||
171 | |||
172 | mov r1, lr | ||
173 | |||
174 | @ | ||
175 | @ We are now ready to fill in the remaining blanks on the stack: | ||
176 | @ | ||
177 | @ r0 - sp_priv | ||
178 | @ r1 - lr_priv | ||
179 | @ r2 - lr_<exception>, already fixed up for correct return/restart | ||
180 | @ r3 - bsr_<exception> | ||
181 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) | ||
182 | @ | ||
183 | stm (r0 - r4), [r5]+ | ||
184 | .endm | ||
185 | |||
186 | /* | ||
187 | * User mode handlers | ||
188 | * | ||
189 | */ | ||
190 | .macro user_entry | ||
191 | sub sp, sp, #S_FRAME_SIZE | ||
192 | stm (r1 - r15), [sp+] | ||
193 | add r4, sp, #S_R16 | ||
194 | stm (r16 - r28), [r4]+ | ||
195 | |||
196 | ldm (r1 - r3), [r0]+ | ||
197 | add r0, sp, #S_PC @ here for interlock avoidance | ||
198 | mov r4, #-1 @ "" "" "" "" | ||
199 | |||
200 | stw r1, [sp] @ save the "real" r0 copied | ||
201 | @ from the exception stack | ||
202 | |||
203 | @ | ||
204 | @ We are now ready to fill in the remaining blanks on the stack: | ||
205 | @ | ||
206 | @ r2 - lr_<exception>, already fixed up for correct return/restart | ||
207 | @ r3 - bsr_<exception> | ||
208 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) | ||
209 | @ | ||
210 | @ Also, separately save sp_user and lr_user | ||
211 | @ | ||
212 | stm (r2 - r4), [r0]+ | ||
213 | stur (sp, lr), [r0-] | ||
214 | |||
215 | @ | ||
216 | @ Enable the alignment trap while in kernel mode | ||
217 | @ | ||
218 | alignment_trap r0 | ||
219 | |||
220 | @ | ||
221 | @ Clear FP to mark the first stack frame | ||
222 | @ | ||
223 | zero_fp | ||
224 | .endm | ||
225 | |||
226 | .text | ||
227 | |||
228 | @ | ||
229 | @ __invalid - generic code for failed exception | ||
230 | @ (re-entrant version of handlers) | ||
231 | @ | ||
232 | __invalid: | ||
233 | sub sp, sp, #S_FRAME_SIZE | ||
234 | stm (r1 - r15), [sp+] | ||
235 | add r1, sp, #S_R16 | ||
236 | stm (r16 - r28, sp, lr), [r1]+ | ||
237 | |||
238 | zero_fp | ||
239 | |||
240 | ldm (r4 - r6), [r0]+ | ||
241 | add r0, sp, #S_PC @ here for interlock avoidance | ||
242 | mov r7, #-1 @ "" "" "" "" | ||
243 | stw r4, [sp] @ save preserved r0 | ||
244 | stm (r5 - r7), [r0]+ @ lr_<exception>, | ||
245 | @ asr_<exception>, "old_r0" | ||
246 | |||
247 | mov r0, sp | ||
248 | mov r1, asr | ||
249 | b bad_mode | ||
250 | ENDPROC(__invalid) | ||
251 | |||
252 | .align 5 | ||
253 | __dabt_priv: | ||
254 | priv_entry | ||
255 | |||
256 | @ | ||
257 | @ get ready to re-enable interrupts if appropriate | ||
258 | @ | ||
259 | mov r17, asr | ||
260 | cand.a r3, #PSR_I_BIT | ||
261 | bne 1f | ||
262 | andn r17, r17, #PSR_I_BIT | ||
263 | 1: | ||
264 | |||
265 | @ | ||
266 | @ Call the processor-specific abort handler: | ||
267 | @ | ||
268 | @ r2 - aborted context pc | ||
269 | @ r3 - aborted context asr | ||
270 | @ | ||
271 | @ The abort handler must return the aborted address in r0, and | ||
272 | @ the fault status register in r1. | ||
273 | @ | ||
274 | movc r1, p0.c3, #0 @ get FSR | ||
275 | movc r0, p0.c4, #0 @ get FAR | ||
276 | |||
277 | @ | ||
278 | @ set desired INTR state, then call main handler | ||
279 | @ | ||
280 | mov.a asr, r17 | ||
281 | mov r2, sp | ||
282 | b.l do_DataAbort | ||
283 | |||
284 | @ | ||
285 | @ INTRs off again before pulling preserved data off the stack | ||
286 | @ | ||
287 | disable_irq r0 | ||
288 | |||
289 | @ | ||
290 | @ restore BSR and restart the instruction | ||
291 | @ | ||
292 | ldw r2, [sp+], #S_PSR | ||
293 | priv_exit r2 @ return from exception | ||
294 | ENDPROC(__dabt_priv) | ||
295 | |||
296 | .align 5 | ||
297 | __intr_priv: | ||
298 | priv_entry | ||
299 | |||
300 | intr_handler | ||
301 | |||
302 | mov r0, #0 @ epip4d | ||
303 | movc p0.c5, r0, #14 | ||
304 | nop; nop; nop; nop; nop; nop; nop; nop | ||
305 | |||
306 | ldw r4, [sp+], #S_PSR @ irqs are already disabled | ||
307 | |||
308 | priv_exit r4 @ return from exception | ||
309 | ENDPROC(__intr_priv) | ||
310 | |||
311 | .ltorg | ||
312 | |||
313 | .align 5 | ||
314 | __extn_priv: | ||
315 | priv_entry | ||
316 | |||
317 | mov r0, sp @ struct pt_regs *regs | ||
318 | mov r1, asr | ||
319 | b bad_mode @ not supported | ||
320 | ENDPROC(__extn_priv) | ||
321 | |||
322 | .align 5 | ||
323 | __pabt_priv: | ||
324 | priv_entry | ||
325 | |||
326 | @ | ||
327 | @ re-enable interrupts if appropriate | ||
328 | @ | ||
329 | mov r17, asr | ||
330 | cand.a r3, #PSR_I_BIT | ||
331 | bne 1f | ||
332 | andn r17, r17, #PSR_I_BIT | ||
333 | 1: | ||
334 | |||
335 | @ | ||
336 | @ set args, then call main handler | ||
337 | @ | ||
338 | @ r0 - address of faulting instruction | ||
339 | @ r1 - pointer to registers on stack | ||
340 | @ | ||
341 | mov r0, r2 @ pass address of aborted instruction | ||
342 | mov r1, #5 | ||
343 | mov.a asr, r17 | ||
344 | mov r2, sp @ regs | ||
345 | b.l do_PrefetchAbort @ call abort handler | ||
346 | |||
347 | @ | ||
348 | @ INTRs off again before pulling preserved data off the stack | ||
349 | @ | ||
350 | disable_irq r0 | ||
351 | |||
352 | @ | ||
353 | @ restore BSR and restart the instruction | ||
354 | @ | ||
355 | ldw r2, [sp+], #S_PSR | ||
356 | priv_exit r2 @ return from exception | ||
357 | ENDPROC(__pabt_priv) | ||
358 | |||
359 | .align 5 | ||
360 | .LCcralign: | ||
361 | .word cr_alignment | ||
362 | |||
363 | .align 5 | ||
364 | __dabt_user: | ||
365 | user_entry | ||
366 | |||
367 | #ifdef CONFIG_UNICORE_FPU_F64 | ||
368 | cff ip, s31 | ||
369 | cand.a ip, #0x08000000 @ FPU execption traps? | ||
370 | beq 209f | ||
371 | |||
372 | ldw ip, [sp+], #S_PC | ||
373 | add ip, ip, #4 | ||
374 | stw ip, [sp+], #S_PC | ||
375 | @ | ||
376 | @ fall through to the emulation code, which returns using r19 if | ||
377 | @ it has emulated the instruction, or the more conventional lr | ||
378 | @ if we are to treat this as a real extended instruction | ||
379 | @ | ||
380 | @ r0 - instruction | ||
381 | @ | ||
382 | 1: ldw.u r0, [r2] | ||
383 | adr r19, ret_from_exception | ||
384 | adr lr, 209f | ||
385 | @ | ||
386 | @ fallthrough to call do_uc_f64 | ||
387 | @ | ||
388 | /* | ||
389 | * Check whether the instruction is a co-processor instruction. | ||
390 | * If yes, we need to call the relevant co-processor handler. | ||
391 | * | ||
392 | * Note that we don't do a full check here for the co-processor | ||
393 | * instructions; all instructions with bit 27 set are well | ||
394 | * defined. The only instructions that should fault are the | ||
395 | * co-processor instructions. | ||
396 | * | ||
397 | * Emulators may wish to make use of the following registers: | ||
398 | * r0 = instruction opcode. | ||
399 | * r2 = PC | ||
400 | * r19 = normal "successful" return address | ||
401 | * r20 = this threads thread_info structure. | ||
402 | * lr = unrecognised instruction return address | ||
403 | */ | ||
404 | get_thread_info r20 @ get current thread | ||
405 | and r8, r0, #0x00003c00 @ mask out CP number | ||
406 | mov r7, #1 | ||
407 | stb r7, [r20+], #TI_USED_CP + 2 @ set appropriate used_cp[] | ||
408 | |||
409 | @ F64 hardware support entry point. | ||
410 | @ r0 = faulted instruction | ||
411 | @ r19 = return address | ||
412 | @ r20 = fp_state | ||
413 | enable_irq r4 | ||
414 | add r20, r20, #TI_FPSTATE @ r20 = workspace | ||
415 | cff r1, s31 @ get fpu FPSCR | ||
416 | andn r2, r1, #0x08000000 | ||
417 | ctf r2, s31 @ clear 27 bit | ||
418 | mov r2, sp @ nothing stacked - regdump is at TOS | ||
419 | mov lr, r19 @ setup for a return to the user code | ||
420 | |||
421 | @ Now call the C code to package up the bounce to the support code | ||
422 | @ r0 holds the trigger instruction | ||
423 | @ r1 holds the FPSCR value | ||
424 | @ r2 pointer to register dump | ||
425 | b ucf64_exchandler | ||
426 | 209: | ||
427 | #endif | ||
428 | @ | ||
429 | @ Call the processor-specific abort handler: | ||
430 | @ | ||
431 | @ r2 - aborted context pc | ||
432 | @ r3 - aborted context asr | ||
433 | @ | ||
434 | @ The abort handler must return the aborted address in r0, and | ||
435 | @ the fault status register in r1. | ||
436 | @ | ||
437 | movc r1, p0.c3, #0 @ get FSR | ||
438 | movc r0, p0.c4, #0 @ get FAR | ||
439 | |||
440 | @ | ||
441 | @ INTRs on, then call the main handler | ||
442 | @ | ||
443 | enable_irq r2 | ||
444 | mov r2, sp | ||
445 | adr lr, ret_from_exception | ||
446 | b do_DataAbort | ||
447 | ENDPROC(__dabt_user) | ||
448 | |||
449 | .align 5 | ||
450 | __intr_user: | ||
451 | user_entry | ||
452 | |||
453 | get_thread_info tsk | ||
454 | |||
455 | intr_handler | ||
456 | |||
457 | mov why, #0 | ||
458 | b ret_to_user | ||
459 | ENDPROC(__intr_user) | ||
460 | |||
461 | .ltorg | ||
462 | |||
463 | .align 5 | ||
464 | __extn_user: | ||
465 | user_entry | ||
466 | |||
467 | mov r0, sp | ||
468 | mov r1, asr | ||
469 | b bad_mode | ||
470 | ENDPROC(__extn_user) | ||
471 | |||
472 | .align 5 | ||
473 | __pabt_user: | ||
474 | user_entry | ||
475 | |||
476 | mov r0, r2 @ pass address of aborted instruction. | ||
477 | mov r1, #5 | ||
478 | enable_irq r1 @ Enable interrupts | ||
479 | mov r2, sp @ regs | ||
480 | b.l do_PrefetchAbort @ call abort handler | ||
481 | /* fall through */ | ||
482 | /* | ||
483 | * This is the return code to user mode for abort handlers | ||
484 | */ | ||
485 | ENTRY(ret_from_exception) | ||
486 | get_thread_info tsk | ||
487 | mov why, #0 | ||
488 | b ret_to_user | ||
489 | ENDPROC(__pabt_user) | ||
490 | ENDPROC(ret_from_exception) | ||
491 | |||
492 | /* | ||
493 | * Register switch for UniCore V2 processors | ||
494 | * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info | ||
495 | * previous and next are guaranteed not to be the same. | ||
496 | */ | ||
497 | ENTRY(__switch_to) | ||
498 | add ip, r1, #TI_CPU_SAVE | ||
499 | stm.w (r4 - r15), [ip]+ | ||
500 | stm.w (r16 - r27, sp, lr), [ip]+ | ||
501 | |||
502 | #ifdef CONFIG_UNICORE_FPU_F64 | ||
503 | add ip, r1, #TI_FPSTATE | ||
504 | sfm.w (f0 - f7 ), [ip]+ | ||
505 | sfm.w (f8 - f15), [ip]+ | ||
506 | sfm.w (f16 - f23), [ip]+ | ||
507 | sfm.w (f24 - f31), [ip]+ | ||
508 | cff r4, s31 | ||
509 | stw r4, [ip] | ||
510 | |||
511 | add ip, r2, #TI_FPSTATE | ||
512 | lfm.w (f0 - f7 ), [ip]+ | ||
513 | lfm.w (f8 - f15), [ip]+ | ||
514 | lfm.w (f16 - f23), [ip]+ | ||
515 | lfm.w (f24 - f31), [ip]+ | ||
516 | ldw r4, [ip] | ||
517 | ctf r4, s31 | ||
518 | #endif | ||
519 | add ip, r2, #TI_CPU_SAVE | ||
520 | ldm.w (r4 - r15), [ip]+ | ||
521 | ldm (r16 - r27, sp, pc), [ip]+ @ Load all regs saved previously | ||
522 | ENDPROC(__switch_to) | ||
523 | |||
524 | .align 5 | ||
525 | /* | ||
526 | * This is the fast syscall return path. We do as little as | ||
527 | * possible here, and this includes saving r0 back into the PRIV | ||
528 | * stack. | ||
529 | */ | ||
530 | ret_fast_syscall: | ||
531 | disable_irq r1 @ disable interrupts | ||
532 | ldw r1, [tsk+], #TI_FLAGS | ||
533 | cand.a r1, #_TIF_WORK_MASK | ||
534 | bne fast_work_pending | ||
535 | |||
536 | @ fast_restore_user_regs | ||
537 | restore_user_regs fast = 1, offset = S_OFF | ||
538 | |||
539 | /* | ||
540 | * Ok, we need to do extra processing, enter the slow path. | ||
541 | */ | ||
542 | fast_work_pending: | ||
543 | stw.w r0, [sp+], #S_R0+S_OFF @ returned r0 | ||
544 | work_pending: | ||
545 | cand.a r1, #_TIF_NEED_RESCHED | ||
546 | bne work_resched | ||
547 | cand.a r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME | ||
548 | beq no_work_pending | ||
549 | mov r0, sp @ 'regs' | ||
550 | mov r2, why @ 'syscall' | ||
551 | cand.a r1, #_TIF_SIGPENDING @ delivering a signal? | ||
552 | cmovne why, #0 @ prevent further restarts | ||
553 | b.l do_notify_resume | ||
554 | b ret_slow_syscall @ Check work again | ||
555 | |||
556 | work_resched: | ||
557 | b.l schedule | ||
558 | /* | ||
559 | * "slow" syscall return path. "why" tells us if this was a real syscall. | ||
560 | */ | ||
561 | ENTRY(ret_to_user) | ||
562 | ret_slow_syscall: | ||
563 | disable_irq r1 @ disable interrupts | ||
564 | get_thread_info tsk @ epip4d, one path error?! | ||
565 | ldw r1, [tsk+], #TI_FLAGS | ||
566 | cand.a r1, #_TIF_WORK_MASK | ||
567 | bne work_pending | ||
568 | no_work_pending: | ||
569 | @ slow_restore_user_regs | ||
570 | restore_user_regs fast = 0, offset = 0 | ||
571 | ENDPROC(ret_to_user) | ||
572 | |||
573 | /* | ||
574 | * This is how we return from a fork. | ||
575 | */ | ||
576 | ENTRY(ret_from_fork) | ||
577 | b.l schedule_tail | ||
578 | get_thread_info tsk | ||
579 | ldw r1, [tsk+], #TI_FLAGS @ check for syscall tracing | ||
580 | mov why, #1 | ||
581 | cand.a r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? | ||
582 | beq ret_slow_syscall | ||
583 | mov r1, sp | ||
584 | mov r0, #1 @ trace exit [IP = 1] | ||
585 | b.l syscall_trace | ||
586 | b ret_slow_syscall | ||
587 | ENDPROC(ret_from_fork) | ||
588 | |||
589 | /*============================================================================= | ||
590 | * SWI handler | ||
591 | *----------------------------------------------------------------------------- | ||
592 | */ | ||
593 | .align 5 | ||
594 | ENTRY(vector_swi) | ||
595 | sub sp, sp, #S_FRAME_SIZE | ||
596 | stm (r0 - r15), [sp]+ @ Calling r0 - r15 | ||
597 | add r8, sp, #S_R16 | ||
598 | stm (r16 - r28), [r8]+ @ Calling r16 - r28 | ||
599 | add r8, sp, #S_PC | ||
600 | stur (sp, lr), [r8-] @ Calling sp, lr | ||
601 | mov r8, bsr @ called from non-REAL mode | ||
602 | stw lr, [sp+], #S_PC @ Save calling PC | ||
603 | stw r8, [sp+], #S_PSR @ Save ASR | ||
604 | stw r0, [sp+], #S_OLD_R0 @ Save OLD_R0 | ||
605 | zero_fp | ||
606 | |||
607 | /* | ||
608 | * Get the system call number. | ||
609 | */ | ||
610 | sub ip, lr, #4 | ||
611 | ldw.u scno, [ip] @ get SWI instruction | ||
612 | |||
613 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
614 | ldw ip, __cr_alignment | ||
615 | ldw ip, [ip] | ||
616 | movc p0.c1, ip, #0 @ update control register | ||
617 | #endif | ||
618 | enable_irq ip | ||
619 | |||
620 | get_thread_info tsk | ||
621 | ldw tbl, =sys_call_table @ load syscall table pointer | ||
622 | |||
623 | andn scno, scno, #0xff000000 @ mask off SWI op-code | ||
624 | andn scno, scno, #0x00ff0000 @ mask off SWI op-code | ||
625 | |||
626 | stm.w (r4, r5), [sp-] @ push fifth and sixth args | ||
627 | ldw ip, [tsk+], #TI_FLAGS @ check for syscall tracing | ||
628 | cand.a ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? | ||
629 | bne __sys_trace | ||
630 | |||
631 | csub.a scno, #__NR_syscalls @ check upper syscall limit | ||
632 | adr lr, ret_fast_syscall @ return address | ||
633 | bea 1f | ||
634 | ldw pc, [tbl+], scno << #2 @ call sys_* routine | ||
635 | 1: | ||
636 | add r1, sp, #S_OFF | ||
637 | 2: mov why, #0 @ no longer a real syscall | ||
638 | b sys_ni_syscall @ not private func | ||
639 | |||
640 | /* | ||
641 | * This is the really slow path. We're going to be doing | ||
642 | * context switches, and waiting for our parent to respond. | ||
643 | */ | ||
644 | __sys_trace: | ||
645 | mov r2, scno | ||
646 | add r1, sp, #S_OFF | ||
647 | mov r0, #0 @ trace entry [IP = 0] | ||
648 | b.l syscall_trace | ||
649 | |||
650 | adr lr, __sys_trace_return @ return address | ||
651 | mov scno, r0 @ syscall number (possibly new) | ||
652 | add r1, sp, #S_R0 + S_OFF @ pointer to regs | ||
653 | csub.a scno, #__NR_syscalls @ check upper syscall limit | ||
654 | bea 2b | ||
655 | ldm (r0 - r3), [r1]+ @ have to reload r0 - r3 | ||
656 | ldw pc, [tbl+], scno << #2 @ call sys_* routine | ||
657 | |||
658 | __sys_trace_return: | ||
659 | stw.w r0, [sp+], #S_R0 + S_OFF @ save returned r0 | ||
660 | mov r2, scno | ||
661 | mov r1, sp | ||
662 | mov r0, #1 @ trace exit [IP = 1] | ||
663 | b.l syscall_trace | ||
664 | b ret_slow_syscall | ||
665 | |||
666 | .align 5 | ||
667 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
668 | .type __cr_alignment, #object | ||
669 | __cr_alignment: | ||
670 | .word cr_alignment | ||
671 | #endif | ||
672 | .ltorg | ||
673 | |||
674 | ENTRY(sys_execve) | ||
675 | add r3, sp, #S_OFF | ||
676 | b __sys_execve | ||
677 | ENDPROC(sys_execve) | ||
678 | |||
679 | ENTRY(sys_clone) | ||
680 | add ip, sp, #S_OFF | ||
681 | stw ip, [sp+], #4 | ||
682 | b __sys_clone | ||
683 | ENDPROC(sys_clone) | ||
684 | |||
685 | ENTRY(sys_rt_sigreturn) | ||
686 | add r0, sp, #S_OFF | ||
687 | mov why, #0 @ prevent syscall restart handling | ||
688 | b __sys_rt_sigreturn | ||
689 | ENDPROC(sys_rt_sigreturn) | ||
690 | |||
691 | ENTRY(sys_sigaltstack) | ||
692 | ldw r2, [sp+], #S_OFF + S_SP | ||
693 | b do_sigaltstack | ||
694 | ENDPROC(sys_sigaltstack) | ||
695 | |||
696 | __INIT | ||
697 | |||
698 | /* | ||
699 | * Vector stubs. | ||
700 | * | ||
701 | * This code is copied to 0xffff0200 so we can use branches in the | ||
702 | * vectors, rather than ldr's. Note that this code must not | ||
703 | * exceed 0x300 bytes. | ||
704 | * | ||
705 | * Common stub entry macro: | ||
706 | * Enter in INTR mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC | ||
707 | * | ||
708 | * SP points to a minimal amount of processor-private memory, the address | ||
709 | * of which is copied into r0 for the mode specific abort handler. | ||
710 | */ | ||
711 | .macro vector_stub, name, mode | ||
712 | .align 5 | ||
713 | |||
714 | vector_\name: | ||
715 | @ | ||
716 | @ Save r0, lr_<exception> (parent PC) and bsr_<exception> | ||
717 | @ (parent ASR) | ||
718 | @ | ||
719 | stw r0, [sp] | ||
720 | stw lr, [sp+], #4 @ save r0, lr | ||
721 | mov lr, bsr | ||
722 | stw lr, [sp+], #8 @ save bsr | ||
723 | |||
724 | @ | ||
725 | @ Prepare for PRIV mode. INTRs remain disabled. | ||
726 | @ | ||
727 | mov r0, asr | ||
728 | xor r0, r0, #(\mode ^ PRIV_MODE) | ||
729 | mov.a bsr, r0 | ||
730 | |||
731 | @ | ||
732 | @ the branch table must immediately follow this code | ||
733 | @ | ||
734 | and lr, lr, #0x03 | ||
735 | add lr, lr, #1 | ||
736 | mov r0, sp | ||
737 | ldw lr, [pc+], lr << #2 | ||
738 | mov.a pc, lr @ branch to handler in PRIV mode | ||
739 | ENDPROC(vector_\name) | ||
740 | .align 2 | ||
741 | @ handler addresses follow this label | ||
742 | .endm | ||
743 | |||
744 | .globl __stubs_start | ||
745 | __stubs_start: | ||
746 | /* | ||
747 | * Interrupt dispatcher | ||
748 | */ | ||
749 | vector_stub intr, INTR_MODE | ||
750 | |||
751 | .long __intr_user @ 0 (USER) | ||
752 | .long __invalid @ 1 | ||
753 | .long __invalid @ 2 | ||
754 | .long __intr_priv @ 3 (PRIV) | ||
755 | |||
756 | /* | ||
757 | * Data abort dispatcher | ||
758 | * Enter in ABT mode, bsr = USER ASR, lr = USER PC | ||
759 | */ | ||
760 | vector_stub dabt, ABRT_MODE | ||
761 | |||
762 | .long __dabt_user @ 0 (USER) | ||
763 | .long __invalid @ 1 | ||
764 | .long __invalid @ 2 (INTR) | ||
765 | .long __dabt_priv @ 3 (PRIV) | ||
766 | |||
767 | /* | ||
768 | * Prefetch abort dispatcher | ||
769 | * Enter in ABT mode, bsr = USER ASR, lr = USER PC | ||
770 | */ | ||
771 | vector_stub pabt, ABRT_MODE | ||
772 | |||
773 | .long __pabt_user @ 0 (USER) | ||
774 | .long __invalid @ 1 | ||
775 | .long __invalid @ 2 (INTR) | ||
776 | .long __pabt_priv @ 3 (PRIV) | ||
777 | |||
778 | /* | ||
779 | * Undef instr entry dispatcher | ||
780 | * Enter in EXTN mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC | ||
781 | */ | ||
782 | vector_stub extn, EXTN_MODE | ||
783 | |||
784 | .long __extn_user @ 0 (USER) | ||
785 | .long __invalid @ 1 | ||
786 | .long __invalid @ 2 (INTR) | ||
787 | .long __extn_priv @ 3 (PRIV) | ||
788 | |||
789 | /* | ||
790 | * We group all the following data together to optimise | ||
791 | * for CPUs with separate I & D caches. | ||
792 | */ | ||
793 | .align 5 | ||
794 | |||
795 | .LCvswi: | ||
796 | .word vector_swi | ||
797 | |||
798 | .globl __stubs_end | ||
799 | __stubs_end: | ||
800 | |||
801 | .equ stubs_offset, __vectors_start + 0x200 - __stubs_start | ||
802 | |||
803 | .globl __vectors_start | ||
804 | __vectors_start: | ||
805 | jepriv SYS_ERROR0 | ||
806 | b vector_extn + stubs_offset | ||
807 | ldw pc, .LCvswi + stubs_offset | ||
808 | b vector_pabt + stubs_offset | ||
809 | b vector_dabt + stubs_offset | ||
810 | jepriv SYS_ERROR0 | ||
811 | b vector_intr + stubs_offset | ||
812 | jepriv SYS_ERROR0 | ||
813 | |||
814 | .globl __vectors_end | ||
815 | __vectors_end: | ||
816 | |||
817 | .data | ||
818 | |||
819 | .globl cr_alignment | ||
820 | .globl cr_no_alignment | ||
821 | cr_alignment: | ||
822 | .space 4 | ||
823 | cr_no_alignment: | ||
824 | .space 4 | ||