aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh64/kernel/entry.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/sh64/kernel/entry.S
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/sh64/kernel/entry.S')
-rw-r--r--arch/sh64/kernel/entry.S2103
1 files changed, 2103 insertions, 0 deletions
diff --git a/arch/sh64/kernel/entry.S b/arch/sh64/kernel/entry.S
new file mode 100644
index 000000000000..2e2cfe20b426
--- /dev/null
+++ b/arch/sh64/kernel/entry.S
@@ -0,0 +1,2103 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/entry.S
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2004, 2005 Paul Mundt
10 * Copyright (C) 2003, 2004 Richard Curnow
11 *
12 */
13
14#include <linux/config.h>
15#include <linux/errno.h>
16#include <linux/sys.h>
17
18#include <asm/processor.h>
19#include <asm/registers.h>
20#include <asm/unistd.h>
21#include <asm/thread_info.h>
22#include <asm/asm-offsets.h>
23
24/*
25 * SR fields.
26 */
27#define SR_ASID_MASK 0x00ff0000
28#define SR_FD_MASK 0x00008000
29#define SR_SS 0x08000000
30#define SR_BL 0x10000000
31#define SR_MD 0x40000000
32
33/*
34 * Event code.
35 */
36#define EVENT_INTERRUPT 0
37#define EVENT_FAULT_TLB 1
38#define EVENT_FAULT_NOT_TLB 2
39#define EVENT_DEBUG 3
40
41/* EXPEVT values */
42#define RESET_CAUSE 0x20
43#define DEBUGSS_CAUSE 0x980
44
45/*
46 * Frame layout. Quad index.
47 */
48#define FRAME_T(x) FRAME_TBASE+(x*8)
49#define FRAME_R(x) FRAME_RBASE+(x*8)
50#define FRAME_S(x) FRAME_SBASE+(x*8)
51#define FSPC 0
52#define FSSR 1
53#define FSYSCALL_ID 2
54
55/* Arrange the save frame to be a multiple of 32 bytes long */
56#define FRAME_SBASE 0
57#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
58#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
59#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
60#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
61
62#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
63#define FP_FRAME_BASE 0
64
65#define SAVED_R2 0*8
66#define SAVED_R3 1*8
67#define SAVED_R4 2*8
68#define SAVED_R5 3*8
69#define SAVED_R18 4*8
70#define SAVED_R6 5*8
71#define SAVED_TR0 6*8
72
73/* These are the registers saved in the TLB path that aren't saved in the first
74 level of the normal one. */
75#define TLB_SAVED_R25 7*8
76#define TLB_SAVED_TR1 8*8
77#define TLB_SAVED_TR2 9*8
78#define TLB_SAVED_TR3 10*8
79#define TLB_SAVED_TR4 11*8
80/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
81 breakage otherwise. */
82#define TLB_SAVED_R0 12*8
83#define TLB_SAVED_R1 13*8
84
85#define CLI() \
86 getcon SR, r6; \
87 ori r6, 0xf0, r6; \
88 putcon r6, SR;
89
90#define STI() \
91 getcon SR, r6; \
92 andi r6, ~0xf0, r6; \
93 putcon r6, SR;
94
95#ifdef CONFIG_PREEMPT
96# define preempt_stop() CLI()
97#else
98# define preempt_stop()
99# define resume_kernel restore_all
100#endif
101
102 .section .data, "aw"
103
104#define FAST_TLBMISS_STACK_CACHELINES 4
105#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
106
107/* Register back-up area for all exceptions */
108 .balign 32
109 /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
110 * register saves etc. */
111 .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
112/* This is 32 byte aligned by construction */
113/* Register back-up area for all exceptions */
114reg_save_area:
115 .quad 0
116 .quad 0
117 .quad 0
118 .quad 0
119
120 .quad 0
121 .quad 0
122 .quad 0
123 .quad 0
124
125 .quad 0
126 .quad 0
127 .quad 0
128 .quad 0
129
130 .quad 0
131 .quad 0
132
133/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
134 * reentrancy. Note this area may be accessed via physical address.
135 * Align so this fits a whole single cache line, for ease of purging.
136 */
137 .balign 32,0,32
138resvec_save_area:
139 .quad 0
140 .quad 0
141 .quad 0
142 .quad 0
143 .quad 0
144 .balign 32,0,32
145
146/* Jump table of 3rd level handlers */
147trap_jtable:
148 .long do_exception_error /* 0x000 */
149 .long do_exception_error /* 0x020 */
150 .long tlb_miss_load /* 0x040 */
151 .long tlb_miss_store /* 0x060 */
152 ! ARTIFICIAL pseudo-EXPEVT setting
153 .long do_debug_interrupt /* 0x080 */
154 .long tlb_miss_load /* 0x0A0 */
155 .long tlb_miss_store /* 0x0C0 */
156 .long do_address_error_load /* 0x0E0 */
157 .long do_address_error_store /* 0x100 */
158#ifdef CONFIG_SH_FPU
159 .long do_fpu_error /* 0x120 */
160#else
161 .long do_exception_error /* 0x120 */
162#endif
163 .long do_exception_error /* 0x140 */
164 .long system_call /* 0x160 */
165 .long do_reserved_inst /* 0x180 */
166 .long do_illegal_slot_inst /* 0x1A0 */
167 .long do_NMI /* 0x1C0 */
168 .long do_exception_error /* 0x1E0 */
169 .rept 15
170 .long do_IRQ /* 0x200 - 0x3C0 */
171 .endr
172 .long do_exception_error /* 0x3E0 */
173 .rept 32
174 .long do_IRQ /* 0x400 - 0x7E0 */
175 .endr
176 .long fpu_error_or_IRQA /* 0x800 */
177 .long fpu_error_or_IRQB /* 0x820 */
178 .long do_IRQ /* 0x840 */
179 .long do_IRQ /* 0x860 */
180 .rept 6
181 .long do_exception_error /* 0x880 - 0x920 */
182 .endr
183 .long do_software_break_point /* 0x940 */
184 .long do_exception_error /* 0x960 */
185 .long do_single_step /* 0x980 */
186
187 .rept 3
188 .long do_exception_error /* 0x9A0 - 0x9E0 */
189 .endr
190 .long do_IRQ /* 0xA00 */
191 .long do_IRQ /* 0xA20 */
192 .long itlb_miss_or_IRQ /* 0xA40 */
193 .long do_IRQ /* 0xA60 */
194 .long do_IRQ /* 0xA80 */
195 .long itlb_miss_or_IRQ /* 0xAA0 */
196 .long do_exception_error /* 0xAC0 */
197 .long do_address_error_exec /* 0xAE0 */
198 .rept 8
199 .long do_exception_error /* 0xB00 - 0xBE0 */
200 .endr
201 .rept 18
202 .long do_IRQ /* 0xC00 - 0xE20 */
203 .endr
204
205 .section .text64, "ax"
206
207/*
208 * --- Exception/Interrupt/Event Handling Section
209 */
210
211/*
212 * VBR and RESVEC blocks.
213 *
214 * First level handler for VBR-based exceptions.
215 *
216 * To avoid waste of space, align to the maximum text block size.
217 * This is assumed to be at most 128 bytes or 32 instructions.
218 * DO NOT EXCEED 32 instructions on the first level handlers !
219 *
220 * Also note that RESVEC is contained within the VBR block
221 * where the room left (1KB - TEXT_SIZE) allows placing
222 * the RESVEC block (at most 512B + TEXT_SIZE).
223 *
224 * So first (and only) level handler for RESVEC-based exceptions.
225 *
226 * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
227 * and interrupt) we are a lot tight with register space until
228 * saving onto the stack frame, which is done in handle_exception().
229 *
230 */
231
232#define TEXT_SIZE 128
233#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
234
235 .balign TEXT_SIZE
236LVBR_block:
237 .space 256, 0 /* Power-on class handler, */
238 /* not required here */
239not_a_tlb_miss:
240 synco /* TAKum03020 (but probably a good idea anyway.) */
241 /* Save original stack pointer into KCR1 */
242 putcon SP, KCR1
243
244 /* Save other original registers into reg_save_area */
245 movi reg_save_area, SP
246 st.q SP, SAVED_R2, r2
247 st.q SP, SAVED_R3, r3
248 st.q SP, SAVED_R4, r4
249 st.q SP, SAVED_R5, r5
250 st.q SP, SAVED_R6, r6
251 st.q SP, SAVED_R18, r18
252 gettr tr0, r3
253 st.q SP, SAVED_TR0, r3
254
255 /* Set args for Non-debug, Not a TLB miss class handler */
256 getcon EXPEVT, r2
257 movi ret_from_exception, r3
258 ori r3, 1, r3
259 movi EVENT_FAULT_NOT_TLB, r4
260 or SP, ZERO, r5
261 getcon KCR1, SP
262 pta handle_exception, tr0
263 blink tr0, ZERO
264
265 .balign 256
266 ! VBR+0x200
267 nop
268 .balign 256
269 ! VBR+0x300
270 nop
271 .balign 256
272 /*
273 * Instead of the natural .balign 1024 place RESVEC here
274 * respecting the final 1KB alignment.
275 */
276 .balign TEXT_SIZE
277 /*
278 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
279 * block making sure the final alignment is correct.
280 */
281tlb_miss:
282 synco /* TAKum03020 (but probably a good idea anyway.) */
283 putcon SP, KCR1
284 movi reg_save_area, SP
285 /* SP is guaranteed 32-byte aligned. */
286 st.q SP, TLB_SAVED_R0 , r0
287 st.q SP, TLB_SAVED_R1 , r1
288 st.q SP, SAVED_R2 , r2
289 st.q SP, SAVED_R3 , r3
290 st.q SP, SAVED_R4 , r4
291 st.q SP, SAVED_R5 , r5
292 st.q SP, SAVED_R6 , r6
293 st.q SP, SAVED_R18, r18
294
295 /* Save R25 for safety; as/ld may want to use it to achieve the call to
296 * the code in mm/tlbmiss.c */
297 st.q SP, TLB_SAVED_R25, r25
298 gettr tr0, r2
299 gettr tr1, r3
300 gettr tr2, r4
301 gettr tr3, r5
302 gettr tr4, r18
303 st.q SP, SAVED_TR0 , r2
304 st.q SP, TLB_SAVED_TR1 , r3
305 st.q SP, TLB_SAVED_TR2 , r4
306 st.q SP, TLB_SAVED_TR3 , r5
307 st.q SP, TLB_SAVED_TR4 , r18
308
309 pt do_fast_page_fault, tr0
310 getcon SSR, r2
311 getcon EXPEVT, r3
312 getcon TEA, r4
313 shlri r2, 30, r2
314 andi r2, 1, r2 /* r2 = SSR.MD */
315 blink tr0, LINK
316
317 pt fixup_to_invoke_general_handler, tr1
318
319 /* If the fast path handler fixed the fault, just drop through quickly
320 to the restore code right away to return to the excepting context.
321 */
322 beqi/u r2, 0, tr1
323
324fast_tlb_miss_restore:
325 ld.q SP, SAVED_TR0, r2
326 ld.q SP, TLB_SAVED_TR1, r3
327 ld.q SP, TLB_SAVED_TR2, r4
328
329 ld.q SP, TLB_SAVED_TR3, r5
330 ld.q SP, TLB_SAVED_TR4, r18
331
332 ptabs r2, tr0
333 ptabs r3, tr1
334 ptabs r4, tr2
335 ptabs r5, tr3
336 ptabs r18, tr4
337
338 ld.q SP, TLB_SAVED_R0, r0
339 ld.q SP, TLB_SAVED_R1, r1
340 ld.q SP, SAVED_R2, r2
341 ld.q SP, SAVED_R3, r3
342 ld.q SP, SAVED_R4, r4
343 ld.q SP, SAVED_R5, r5
344 ld.q SP, SAVED_R6, r6
345 ld.q SP, SAVED_R18, r18
346 ld.q SP, TLB_SAVED_R25, r25
347
348 getcon KCR1, SP
349 rte
350 nop /* for safety, in case the code is run on sh5-101 cut1.x */
351
352fixup_to_invoke_general_handler:
353
354 /* OK, new method. Restore stuff that's not expected to get saved into
355 the 'first-level' reg save area, then just fall through to setting
356 up the registers and calling the second-level handler. */
357
358 /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
359 r25,tr1-4 and save r6 to get into the right state. */
360
361 ld.q SP, TLB_SAVED_TR1, r3
362 ld.q SP, TLB_SAVED_TR2, r4
363 ld.q SP, TLB_SAVED_TR3, r5
364 ld.q SP, TLB_SAVED_TR4, r18
365 ld.q SP, TLB_SAVED_R25, r25
366
367 ld.q SP, TLB_SAVED_R0, r0
368 ld.q SP, TLB_SAVED_R1, r1
369
370 ptabs/u r3, tr1
371 ptabs/u r4, tr2
372 ptabs/u r5, tr3
373 ptabs/u r18, tr4
374
375 /* Set args for Non-debug, TLB miss class handler */
376 getcon EXPEVT, r2
377 movi ret_from_exception, r3
378 ori r3, 1, r3
379 movi EVENT_FAULT_TLB, r4
380 or SP, ZERO, r5
381 getcon KCR1, SP
382 pta handle_exception, tr0
383 blink tr0, ZERO
384
385/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
386 DOES END UP AT VBR+0x600 */
387 nop
388 nop
389 nop
390 nop
391 nop
392 nop
393
394 .balign 256
395 /* VBR + 0x600 */
396
397interrupt:
398 synco /* TAKum03020 (but probably a good idea anyway.) */
399 /* Save original stack pointer into KCR1 */
400 putcon SP, KCR1
401
402 /* Save other original registers into reg_save_area */
403 movi reg_save_area, SP
404 st.q SP, SAVED_R2, r2
405 st.q SP, SAVED_R3, r3
406 st.q SP, SAVED_R4, r4
407 st.q SP, SAVED_R5, r5
408 st.q SP, SAVED_R6, r6
409 st.q SP, SAVED_R18, r18
410 gettr tr0, r3
411 st.q SP, SAVED_TR0, r3
412
413 /* Set args for interrupt class handler */
414 getcon INTEVT, r2
415 movi ret_from_irq, r3
416 ori r3, 1, r3
417 movi EVENT_INTERRUPT, r4
418 or SP, ZERO, r5
419 getcon KCR1, SP
420 pta handle_exception, tr0
421 blink tr0, ZERO
422 .balign TEXT_SIZE /* let's waste the bare minimum */
423
424LVBR_block_end: /* Marker. Used for total checking */
425
426 .balign 256
427LRESVEC_block:
428 /* Panic handler. Called with MMU off. Possible causes/actions:
429 * - Reset: Jump to program start.
430 * - Single Step: Turn off Single Step & return.
431 * - Others: Call panic handler, passing PC as arg.
432 * (this may need to be extended...)
433 */
434reset_or_panic:
435 synco /* TAKum03020 (but probably a good idea anyway.) */
436 putcon SP, DCR
437 /* First save r0-1 and tr0, as we need to use these */
438 movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
439 st.q SP, 0, r0
440 st.q SP, 8, r1
441 gettr tr0, r0
442 st.q SP, 32, r0
443
444 /* Check cause */
445 getcon EXPEVT, r0
446 movi RESET_CAUSE, r1
447 sub r1, r0, r1 /* r1=0 if reset */
448 movi _stext-CONFIG_CACHED_MEMORY_OFFSET, r0
449 ori r0, 1, r0
450 ptabs r0, tr0
451 beqi r1, 0, tr0 /* Jump to start address if reset */
452
453 getcon EXPEVT, r0
454 movi DEBUGSS_CAUSE, r1
455 sub r1, r0, r1 /* r1=0 if single step */
456 pta single_step_panic, tr0
457 beqi r1, 0, tr0 /* jump if single step */
458
459 /* Now jump to where we save the registers. */
460 movi panic_stash_regs-CONFIG_CACHED_MEMORY_OFFSET, r1
461 ptabs r1, tr0
462 blink tr0, r63
463
464single_step_panic:
465 /* We are in a handler with Single Step set. We need to resume the
466 * handler, by turning on MMU & turning off Single Step. */
467 getcon SSR, r0
468 movi SR_MMU, r1
469 or r0, r1, r0
470 movi ~SR_SS, r1
471 and r0, r1, r0
472 putcon r0, SSR
473 /* Restore EXPEVT, as the rte won't do this */
474 getcon PEXPEVT, r0
475 putcon r0, EXPEVT
476 /* Restore regs */
477 ld.q SP, 32, r0
478 ptabs r0, tr0
479 ld.q SP, 0, r0
480 ld.q SP, 8, r1
481 getcon DCR, SP
482 synco
483 rte
484
485
486 .balign 256
487debug_exception:
488 synco /* TAKum03020 (but probably a good idea anyway.) */
489 /*
490 * Single step/software_break_point first level handler.
491 * Called with MMU off, so the first thing we do is enable it
492 * by doing an rte with appropriate SSR.
493 */
494 putcon SP, DCR
495 /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
496 movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
497
498 /* With the MMU off, we are bypassing the cache, so purge any
499 * data that will be made stale by the following stores.
500 */
501 ocbp SP, 0
502 synco
503
504 st.q SP, 0, r0
505 st.q SP, 8, r1
506 getcon SPC, r0
507 st.q SP, 16, r0
508 getcon SSR, r0
509 st.q SP, 24, r0
510
511 /* Enable MMU, block exceptions, set priv mode, disable single step */
512 movi SR_MMU | SR_BL | SR_MD, r1
513 or r0, r1, r0
514 movi ~SR_SS, r1
515 and r0, r1, r0
516 putcon r0, SSR
517 /* Force control to debug_exception_2 when rte is executed */
518 movi debug_exeception_2, r0
519 ori r0, 1, r0 /* force SHmedia, just in case */
520 putcon r0, SPC
521 getcon DCR, SP
522 synco
523 rte
524debug_exeception_2:
525 /* Restore saved regs */
526 putcon SP, KCR1
527 movi resvec_save_area, SP
528 ld.q SP, 24, r0
529 putcon r0, SSR
530 ld.q SP, 16, r0
531 putcon r0, SPC
532 ld.q SP, 0, r0
533 ld.q SP, 8, r1
534
535 /* Save other original registers into reg_save_area */
536 movi reg_save_area, SP
537 st.q SP, SAVED_R2, r2
538 st.q SP, SAVED_R3, r3
539 st.q SP, SAVED_R4, r4
540 st.q SP, SAVED_R5, r5
541 st.q SP, SAVED_R6, r6
542 st.q SP, SAVED_R18, r18
543 gettr tr0, r3
544 st.q SP, SAVED_TR0, r3
545
546 /* Set args for debug class handler */
547 getcon EXPEVT, r2
548 movi ret_from_exception, r3
549 ori r3, 1, r3
550 movi EVENT_DEBUG, r4
551 or SP, ZERO, r5
552 getcon KCR1, SP
553 pta handle_exception, tr0
554 blink tr0, ZERO
555
556 .balign 256
557debug_interrupt:
558 /* !!! WE COME HERE IN REAL MODE !!! */
559 /* Hook-up debug interrupt to allow various debugging options to be
560 * hooked into its handler. */
561 /* Save original stack pointer into KCR1 */
562 synco
563 putcon SP, KCR1
564 movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
565 ocbp SP, 0
566 ocbp SP, 32
567 synco
568
569 /* Save other original registers into reg_save_area thru real addresses */
570 st.q SP, SAVED_R2, r2
571 st.q SP, SAVED_R3, r3
572 st.q SP, SAVED_R4, r4
573 st.q SP, SAVED_R5, r5
574 st.q SP, SAVED_R6, r6
575 st.q SP, SAVED_R18, r18
576 gettr tr0, r3
577 st.q SP, SAVED_TR0, r3
578
579 /* move (spc,ssr)->(pspc,pssr). The rte will shift
580 them back again, so that they look like the originals
581 as far as the real handler code is concerned. */
582 getcon spc, r6
583 putcon r6, pspc
584 getcon ssr, r6
585 putcon r6, pssr
586
587 ! construct useful SR for handle_exception
588 movi 3, r6
589 shlli r6, 30, r6
590 getcon sr, r18
591 or r18, r6, r6
592 putcon r6, ssr
593
594 ! SSR is now the current SR with the MD and MMU bits set
595 ! i.e. the rte will switch back to priv mode and put
596 ! the mmu back on
597
598 ! construct spc
599 movi handle_exception, r18
600 ori r18, 1, r18 ! for safety (do we need this?)
601 putcon r18, spc
602
603 /* Set args for Non-debug, Not a TLB miss class handler */
604
605 ! EXPEVT==0x80 is unused, so 'steal' this value to put the
606 ! debug interrupt handler in the vectoring table
607 movi 0x80, r2
608 movi ret_from_exception, r3
609 ori r3, 1, r3
610 movi EVENT_FAULT_NOT_TLB, r4
611
612 or SP, ZERO, r5
613 movi CONFIG_CACHED_MEMORY_OFFSET, r6
614 add r6, r5, r5
615 getcon KCR1, SP
616
617 synco ! for safety
618 rte ! -> handle_exception, switch back to priv mode again
619
620LRESVEC_block_end: /* Marker. Unused. */
621
622 .balign TEXT_SIZE
623
624/*
625 * Second level handler for VBR-based exceptions. Pre-handler.
626 * In common to all stack-frame sensitive handlers.
627 *
628 * Inputs:
629 * (KCR0) Current [current task union]
630 * (KCR1) Original SP
631 * (r2) INTEVT/EXPEVT
632 * (r3) appropriate return address
633 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
634 * (r5) Pointer to reg_save_area
635 * (SP) Original SP
636 *
637 * Available registers:
638 * (r6)
639 * (r18)
640 * (tr0)
641 *
642 */
643handle_exception:
644 /* Common 2nd level handler. */
645
646 /* First thing we need an appropriate stack pointer */
647 getcon SSR, r6
648 shlri r6, 30, r6
649 andi r6, 1, r6
650 pta stack_ok, tr0
651 bne r6, ZERO, tr0 /* Original stack pointer is fine */
652
653 /* Set stack pointer for user fault */
654 getcon KCR0, SP
655 movi THREAD_SIZE, r6 /* Point to the end */
656 add SP, r6, SP
657
658stack_ok:
659
660/* DEBUG : check for underflow/overflow of the kernel stack */
661 pta no_underflow, tr0
662 getcon KCR0, r6
663 movi 1024, r18
664 add r6, r18, r6
665 bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
666
667/* Just panic to cause a crash. */
668bad_sp:
669 ld.b r63, 0, r6
670 nop
671
672no_underflow:
673 pta bad_sp, tr0
674 getcon kcr0, r6
675 movi THREAD_SIZE, r18
676 add r18, r6, r6
677 bgt SP, r6, tr0 ! sp above the stack
678
679 /* Make some room for the BASIC frame. */
680 movi -(FRAME_SIZE), r6
681 add SP, r6, SP
682
683/* Could do this with no stalling if we had another spare register, but the
684 code below will be OK. */
685 ld.q r5, SAVED_R2, r6
686 ld.q r5, SAVED_R3, r18
687 st.q SP, FRAME_R(2), r6
688 ld.q r5, SAVED_R4, r6
689 st.q SP, FRAME_R(3), r18
690 ld.q r5, SAVED_R5, r18
691 st.q SP, FRAME_R(4), r6
692 ld.q r5, SAVED_R6, r6
693 st.q SP, FRAME_R(5), r18
694 ld.q r5, SAVED_R18, r18
695 st.q SP, FRAME_R(6), r6
696 ld.q r5, SAVED_TR0, r6
697 st.q SP, FRAME_R(18), r18
698 st.q SP, FRAME_T(0), r6
699
700 /* Keep old SP around */
701 getcon KCR1, r6
702
703 /* Save the rest of the general purpose registers */
704 st.q SP, FRAME_R(0), r0
705 st.q SP, FRAME_R(1), r1
706 st.q SP, FRAME_R(7), r7
707 st.q SP, FRAME_R(8), r8
708 st.q SP, FRAME_R(9), r9
709 st.q SP, FRAME_R(10), r10
710 st.q SP, FRAME_R(11), r11
711 st.q SP, FRAME_R(12), r12
712 st.q SP, FRAME_R(13), r13
713 st.q SP, FRAME_R(14), r14
714
715 /* SP is somewhere else */
716 st.q SP, FRAME_R(15), r6
717
718 st.q SP, FRAME_R(16), r16
719 st.q SP, FRAME_R(17), r17
720 /* r18 is saved earlier. */
721 st.q SP, FRAME_R(19), r19
722 st.q SP, FRAME_R(20), r20
723 st.q SP, FRAME_R(21), r21
724 st.q SP, FRAME_R(22), r22
725 st.q SP, FRAME_R(23), r23
726 st.q SP, FRAME_R(24), r24
727 st.q SP, FRAME_R(25), r25
728 st.q SP, FRAME_R(26), r26
729 st.q SP, FRAME_R(27), r27
730 st.q SP, FRAME_R(28), r28
731 st.q SP, FRAME_R(29), r29
732 st.q SP, FRAME_R(30), r30
733 st.q SP, FRAME_R(31), r31
734 st.q SP, FRAME_R(32), r32
735 st.q SP, FRAME_R(33), r33
736 st.q SP, FRAME_R(34), r34
737 st.q SP, FRAME_R(35), r35
738 st.q SP, FRAME_R(36), r36
739 st.q SP, FRAME_R(37), r37
740 st.q SP, FRAME_R(38), r38
741 st.q SP, FRAME_R(39), r39
742 st.q SP, FRAME_R(40), r40
743 st.q SP, FRAME_R(41), r41
744 st.q SP, FRAME_R(42), r42
745 st.q SP, FRAME_R(43), r43
746 st.q SP, FRAME_R(44), r44
747 st.q SP, FRAME_R(45), r45
748 st.q SP, FRAME_R(46), r46
749 st.q SP, FRAME_R(47), r47
750 st.q SP, FRAME_R(48), r48
751 st.q SP, FRAME_R(49), r49
752 st.q SP, FRAME_R(50), r50
753 st.q SP, FRAME_R(51), r51
754 st.q SP, FRAME_R(52), r52
755 st.q SP, FRAME_R(53), r53
756 st.q SP, FRAME_R(54), r54
757 st.q SP, FRAME_R(55), r55
758 st.q SP, FRAME_R(56), r56
759 st.q SP, FRAME_R(57), r57
760 st.q SP, FRAME_R(58), r58
761 st.q SP, FRAME_R(59), r59
762 st.q SP, FRAME_R(60), r60
763 st.q SP, FRAME_R(61), r61
764 st.q SP, FRAME_R(62), r62
765
766 /*
767 * Save the S* registers.
768 */
769 getcon SSR, r61
770 st.q SP, FRAME_S(FSSR), r61
771 getcon SPC, r62
772 st.q SP, FRAME_S(FSPC), r62
773 movi -1, r62 /* Reset syscall_nr */
774 st.q SP, FRAME_S(FSYSCALL_ID), r62
775
776 /* Save the rest of the target registers */
777 gettr tr1, r6
778 st.q SP, FRAME_T(1), r6
779 gettr tr2, r6
780 st.q SP, FRAME_T(2), r6
781 gettr tr3, r6
782 st.q SP, FRAME_T(3), r6
783 gettr tr4, r6
784 st.q SP, FRAME_T(4), r6
785 gettr tr5, r6
786 st.q SP, FRAME_T(5), r6
787 gettr tr6, r6
788 st.q SP, FRAME_T(6), r6
789 gettr tr7, r6
790 st.q SP, FRAME_T(7), r6
791
792 ! setup FP so that unwinder can wind back through nested kernel mode
793 ! exceptions
794 add SP, ZERO, r14
795
796#ifdef CONFIG_POOR_MANS_STRACE
797 /* We've pushed all the registers now, so only r2-r4 hold anything
798 * useful. Move them into callee save registers */
799 or r2, ZERO, r28
800 or r3, ZERO, r29
801 or r4, ZERO, r30
802
803 /* Preserve r2 as the event code */
804 movi evt_debug, r3
805 ori r3, 1, r3
806 ptabs r3, tr0
807
808 or SP, ZERO, r6
809 getcon TRA, r5
810 blink tr0, LINK
811
812 or r28, ZERO, r2
813 or r29, ZERO, r3
814 or r30, ZERO, r4
815#endif
816
817 /* For syscall and debug race condition, get TRA now */
818 getcon TRA, r5
819
820 /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
821 * Also set FD, to catch FPU usage in the kernel.
822 *
823 * benedict.gaster@superh.com 29/07/2002
824 *
825 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
826 * same time change BL from 1->0, as any pending interrupt of a level
827 * higher than he previous value of IMASK will leak through and be
828 * taken unexpectedly.
829 *
830 * To avoid this we raise the IMASK and then issue another PUTCON to
831 * enable interrupts.
832 */
833 getcon SR, r6
834 movi SR_IMASK | SR_FD, r7
835 or r6, r7, r6
836 putcon r6, SR
837 movi SR_UNBLOCK_EXC, r7
838 and r6, r7, r6
839 putcon r6, SR
840
841
842 /* Now call the appropriate 3rd level handler */
843 or r3, ZERO, LINK
844 movi trap_jtable, r3
845 shlri r2, 3, r2
846 ldx.l r2, r3, r3
847 shlri r2, 2, r2
848 ptabs r3, tr0
849 or SP, ZERO, r3
850 blink tr0, ZERO
851
852/*
853 * Second level handler for VBR-based exceptions. Post-handlers.
854 *
855 * Post-handlers for interrupts (ret_from_irq), exceptions
856 * (ret_from_exception) and common reentrance doors (restore_all
857 * to get back to the original context, ret_from_syscall loop to
858 * check kernel exiting).
859 *
860 * ret_with_reschedule and work_notifysig are an inner lables of
861 * the ret_from_syscall loop.
862 *
863 * In common to all stack-frame sensitive handlers.
864 *
865 * Inputs:
866 * (SP) struct pt_regs *, original register's frame pointer (basic)
867 *
868 */
869 .global ret_from_irq
870ret_from_irq:
871#ifdef CONFIG_POOR_MANS_STRACE
872 pta evt_debug_ret_from_irq, tr0
873 ori SP, 0, r2
874 blink tr0, LINK
875#endif
876 ld.q SP, FRAME_S(FSSR), r6
877 shlri r6, 30, r6
878 andi r6, 1, r6
879 pta resume_kernel, tr0
880 bne r6, ZERO, tr0 /* no further checks */
881 STI()
882 pta ret_with_reschedule, tr0
883 blink tr0, ZERO /* Do not check softirqs */
884
885 .global ret_from_exception
886ret_from_exception:
887 preempt_stop()
888
889#ifdef CONFIG_POOR_MANS_STRACE
890 pta evt_debug_ret_from_exc, tr0
891 ori SP, 0, r2
892 blink tr0, LINK
893#endif
894
895 ld.q SP, FRAME_S(FSSR), r6
896 shlri r6, 30, r6
897 andi r6, 1, r6
898 pta resume_kernel, tr0
899 bne r6, ZERO, tr0 /* no further checks */
900
901 /* Check softirqs */
902
903#ifdef CONFIG_PREEMPT
904 pta ret_from_syscall, tr0
905 blink tr0, ZERO
906
907resume_kernel:
908 pta restore_all, tr0
909
910 getcon KCR0, r6
911 ld.l r6, TI_PRE_COUNT, r7
912 beq/u r7, ZERO, tr0
913
914need_resched:
915 ld.l r6, TI_FLAGS, r7
916 movi (1 << TIF_NEED_RESCHED), r8
917 and r8, r7, r8
918 bne r8, ZERO, tr0
919
920 getcon SR, r7
921 andi r7, 0xf0, r7
922 bne r7, ZERO, tr0
923
924 movi ((PREEMPT_ACTIVE >> 16) & 65535), r8
925 shori (PREEMPT_ACTIVE & 65535), r8
926 st.l r6, TI_PRE_COUNT, r8
927
928 STI()
929 movi schedule, r7
930 ori r7, 1, r7
931 ptabs r7, tr1
932 blink tr1, LINK
933
934 st.l r6, TI_PRE_COUNT, ZERO
935 CLI()
936
937 pta need_resched, tr1
938 blink tr1, ZERO
939#endif
940
941 .global ret_from_syscall
942ret_from_syscall:
943
944ret_with_reschedule:
945 getcon KCR0, r6 ! r6 contains current_thread_info
946 ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
947
948 ! FIXME:!!!
949 ! no handling of TIF_SYSCALL_TRACE yet!!
950
951 movi (1 << TIF_NEED_RESCHED), r8
952 and r8, r7, r8
953 pta work_resched, tr0
954 bne r8, ZERO, tr0
955
956 pta restore_all, tr1
957
958 movi (1 << TIF_SIGPENDING), r8
959 and r8, r7, r8
960 pta work_notifysig, tr0
961 bne r8, ZERO, tr0
962
963 blink tr1, ZERO
964
965work_resched:
966 pta ret_from_syscall, tr0
967 gettr tr0, LINK
968 movi schedule, r6
969 ptabs r6, tr0
970 blink tr0, ZERO /* Call schedule(), return on top */
971
972work_notifysig:
973 gettr tr1, LINK
974
975 movi do_signal, r6
976 ptabs r6, tr0
977 or SP, ZERO, r2
978 or ZERO, ZERO, r3
979 blink tr0, LINK /* Call do_signal(regs, 0), return here */
980
981restore_all:
982 /* Do prefetches */
983
984 ld.q SP, FRAME_T(0), r6
985 ld.q SP, FRAME_T(1), r7
986 ld.q SP, FRAME_T(2), r8
987 ld.q SP, FRAME_T(3), r9
988 ptabs r6, tr0
989 ptabs r7, tr1
990 ptabs r8, tr2
991 ptabs r9, tr3
992 ld.q SP, FRAME_T(4), r6
993 ld.q SP, FRAME_T(5), r7
994 ld.q SP, FRAME_T(6), r8
995 ld.q SP, FRAME_T(7), r9
996 ptabs r6, tr4
997 ptabs r7, tr5
998 ptabs r8, tr6
999 ptabs r9, tr7
1000
1001 ld.q SP, FRAME_R(0), r0
1002 ld.q SP, FRAME_R(1), r1
1003 ld.q SP, FRAME_R(2), r2
1004 ld.q SP, FRAME_R(3), r3
1005 ld.q SP, FRAME_R(4), r4
1006 ld.q SP, FRAME_R(5), r5
1007 ld.q SP, FRAME_R(6), r6
1008 ld.q SP, FRAME_R(7), r7
1009 ld.q SP, FRAME_R(8), r8
1010 ld.q SP, FRAME_R(9), r9
1011 ld.q SP, FRAME_R(10), r10
1012 ld.q SP, FRAME_R(11), r11
1013 ld.q SP, FRAME_R(12), r12
1014 ld.q SP, FRAME_R(13), r13
1015 ld.q SP, FRAME_R(14), r14
1016
1017 ld.q SP, FRAME_R(16), r16
1018 ld.q SP, FRAME_R(17), r17
1019 ld.q SP, FRAME_R(18), r18
1020 ld.q SP, FRAME_R(19), r19
1021 ld.q SP, FRAME_R(20), r20
1022 ld.q SP, FRAME_R(21), r21
1023 ld.q SP, FRAME_R(22), r22
1024 ld.q SP, FRAME_R(23), r23
1025 ld.q SP, FRAME_R(24), r24
1026 ld.q SP, FRAME_R(25), r25
1027 ld.q SP, FRAME_R(26), r26
1028 ld.q SP, FRAME_R(27), r27
1029 ld.q SP, FRAME_R(28), r28
1030 ld.q SP, FRAME_R(29), r29
1031 ld.q SP, FRAME_R(30), r30
1032 ld.q SP, FRAME_R(31), r31
1033 ld.q SP, FRAME_R(32), r32
1034 ld.q SP, FRAME_R(33), r33
1035 ld.q SP, FRAME_R(34), r34
1036 ld.q SP, FRAME_R(35), r35
1037 ld.q SP, FRAME_R(36), r36
1038 ld.q SP, FRAME_R(37), r37
1039 ld.q SP, FRAME_R(38), r38
1040 ld.q SP, FRAME_R(39), r39
1041 ld.q SP, FRAME_R(40), r40
1042 ld.q SP, FRAME_R(41), r41
1043 ld.q SP, FRAME_R(42), r42
1044 ld.q SP, FRAME_R(43), r43
1045 ld.q SP, FRAME_R(44), r44
1046 ld.q SP, FRAME_R(45), r45
1047 ld.q SP, FRAME_R(46), r46
1048 ld.q SP, FRAME_R(47), r47
1049 ld.q SP, FRAME_R(48), r48
1050 ld.q SP, FRAME_R(49), r49
1051 ld.q SP, FRAME_R(50), r50
1052 ld.q SP, FRAME_R(51), r51
1053 ld.q SP, FRAME_R(52), r52
1054 ld.q SP, FRAME_R(53), r53
1055 ld.q SP, FRAME_R(54), r54
1056 ld.q SP, FRAME_R(55), r55
1057 ld.q SP, FRAME_R(56), r56
1058 ld.q SP, FRAME_R(57), r57
1059 ld.q SP, FRAME_R(58), r58
1060
1061 getcon SR, r59
1062 movi SR_BLOCK_EXC, r60
1063 or r59, r60, r59
1064 putcon r59, SR /* SR.BL = 1, keep nesting out */
1065 ld.q SP, FRAME_S(FSSR), r61
1066 ld.q SP, FRAME_S(FSPC), r62
1067 movi SR_ASID_MASK, r60
1068 and r59, r60, r59
1069 andc r61, r60, r61 /* Clear out older ASID */
1070 or r59, r61, r61 /* Retain current ASID */
1071 putcon r61, SSR
1072 putcon r62, SPC
1073
1074 /* Ignore FSYSCALL_ID */
1075
1076 ld.q SP, FRAME_R(59), r59
1077 ld.q SP, FRAME_R(60), r60
1078 ld.q SP, FRAME_R(61), r61
1079 ld.q SP, FRAME_R(62), r62
1080
1081 /* Last touch */
1082 ld.q SP, FRAME_R(15), SP
1083 rte
1084 nop
1085
1086/*
1087 * Third level handlers for VBR-based exceptions. Adapting args to
1088 * and/or deflecting to fourth level handlers.
1089 *
1090 * Fourth level handlers interface.
1091 * Most are C-coded handlers directly pointed by the trap_jtable.
1092 * (Third = Fourth level)
1093 * Inputs:
1094 * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
1095 * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1096 * (r3) struct pt_regs *, original register's frame pointer
1097 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1098 * (r5) TRA control register (for syscall/debug benefit only)
1099 * (LINK) return address
1100 * (SP) = r3
1101 *
1102 * Kernel TLB fault handlers will get a slightly different interface.
1103 * (r2) struct pt_regs *, original register's frame pointer
1104 * (r3) writeaccess, whether it's a store fault as opposed to load fault
1105 * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault
1106 * (r5) Effective Address of fault
1107 * (LINK) return address
1108 * (SP) = r2
1109 *
1110 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1111 *
1112 */
1113tlb_miss_load:
1114 or SP, ZERO, r2
1115 or ZERO, ZERO, r3 /* Read */
1116 or ZERO, ZERO, r4 /* Data */
1117 getcon TEA, r5
1118 pta call_do_page_fault, tr0
1119 beq ZERO, ZERO, tr0
1120
1121tlb_miss_store:
1122 or SP, ZERO, r2
1123 movi 1, r3 /* Write */
1124 or ZERO, ZERO, r4 /* Data */
1125 getcon TEA, r5
1126 pta call_do_page_fault, tr0
1127 beq ZERO, ZERO, tr0
1128
1129itlb_miss_or_IRQ:
1130 pta its_IRQ, tr0
1131 beqi/u r4, EVENT_INTERRUPT, tr0
1132 or SP, ZERO, r2
1133 or ZERO, ZERO, r3 /* Read */
1134 movi 1, r4 /* Text */
1135 getcon TEA, r5
1136 /* Fall through */
1137
1138call_do_page_fault:
1139 movi do_page_fault, r6
1140 ptabs r6, tr0
1141 blink tr0, ZERO
1142
1143fpu_error_or_IRQA:
1144 pta its_IRQ, tr0
1145 beqi/l r4, EVENT_INTERRUPT, tr0
1146#ifdef CONFIG_SH_FPU
1147 movi do_fpu_state_restore, r6
1148#else
1149 movi do_exception_error, r6
1150#endif
1151 ptabs r6, tr0
1152 blink tr0, ZERO
1153
1154fpu_error_or_IRQB:
1155 pta its_IRQ, tr0
1156 beqi/l r4, EVENT_INTERRUPT, tr0
1157#ifdef CONFIG_SH_FPU
1158 movi do_fpu_state_restore, r6
1159#else
1160 movi do_exception_error, r6
1161#endif
1162 ptabs r6, tr0
1163 blink tr0, ZERO
1164
1165its_IRQ:
1166 movi do_IRQ, r6
1167 ptabs r6, tr0
1168 blink tr0, ZERO
1169
1170/*
1171 * system_call/unknown_trap third level handler:
1172 *
1173 * Inputs:
1174 * (r2) fault/interrupt code, entry number (TRAP = 11)
1175 * (r3) struct pt_regs *, original register's frame pointer
1176 * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1177 * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1178 * (SP) = r3
1179 * (LINK) return address: ret_from_exception
1180 * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1181 *
1182 * Outputs:
1183 * (*r3) Syscall reply (Saved r2)
1184 * (LINK) In case of syscall only it can be scrapped.
1185 * Common second level post handler will be ret_from_syscall.
1186 * Common (non-trace) exit point to that is syscall_ret (saving
1187 * result to r2). Common bad exit point is syscall_bad (returning
1188 * ENOSYS then saved to r2).
1189 *
1190 */
1191
1192unknown_trap:
1193 /* Unknown Trap or User Trace */
1194 movi do_unknown_trapa, r6
1195 ptabs r6, tr0
1196 ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
1197 andi r2, 0x1ff, r2 /* r2 = syscall # */
1198 blink tr0, LINK
1199
1200 pta syscall_ret, tr0
1201 blink tr0, ZERO
1202
1203 /* New syscall implementation*/
1204system_call:
1205 pta unknown_trap, tr0
1206 or r5, ZERO, r4 /* TRA (=r5) -> r4 */
1207 shlri r4, 20, r4
1208 bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
1209
1210 /* It's a system call */
1211 st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
1212 andi r5, 0x1ff, r5 /* syscall # -> r5 */
1213
1214 STI()
1215
1216 pta syscall_allowed, tr0
1217 movi NR_syscalls - 1, r4 /* Last valid */
1218 bgeu/l r4, r5, tr0
1219
1220syscall_bad:
1221 /* Return ENOSYS ! */
1222 movi -(ENOSYS), r2 /* Fall-through */
1223
1224 .global syscall_ret
1225syscall_ret:
1226 st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
1227
1228#ifdef CONFIG_POOR_MANS_STRACE
1229 /* nothing useful in registers at this point */
1230
1231 movi evt_debug2, r5
1232 ori r5, 1, r5
1233 ptabs r5, tr0
1234 ld.q SP, FRAME_R(9), r2
1235 or SP, ZERO, r3
1236 blink tr0, LINK
1237#endif
1238
1239 ld.q SP, FRAME_S(FSPC), r2
1240 addi r2, 4, r2 /* Move PC, being pre-execution event */
1241 st.q SP, FRAME_S(FSPC), r2
1242 pta ret_from_syscall, tr0
1243 blink tr0, ZERO
1244
1245
1246/* A different return path for ret_from_fork, because we now need
1247 * to call schedule_tail with the later kernels. Because prev is
1248 * loaded into r2 by switch_to() means we can just call it straight away
1249 */
1250
1251.global ret_from_fork
1252ret_from_fork:
1253
1254 movi schedule_tail,r5
1255 ori r5, 1, r5
1256 ptabs r5, tr0
1257 blink tr0, LINK
1258
1259#ifdef CONFIG_POOR_MANS_STRACE
1260 /* nothing useful in registers at this point */
1261
1262 movi evt_debug2, r5
1263 ori r5, 1, r5
1264 ptabs r5, tr0
1265 ld.q SP, FRAME_R(9), r2
1266 or SP, ZERO, r3
1267 blink tr0, LINK
1268#endif
1269
1270 ld.q SP, FRAME_S(FSPC), r2
1271 addi r2, 4, r2 /* Move PC, being pre-execution event */
1272 st.q SP, FRAME_S(FSPC), r2
1273 pta ret_from_syscall, tr0
1274 blink tr0, ZERO
1275
1276
1277
1278syscall_allowed:
1279 /* Use LINK to deflect the exit point, default is syscall_ret */
1280 pta syscall_ret, tr0
1281 gettr tr0, LINK
1282 pta syscall_notrace, tr0
1283
1284 getcon KCR0, r2
1285 ld.l r2, TI_FLAGS, r4
1286 movi (1 << TIF_SYSCALL_TRACE), r6
1287 and r6, r4, r6
1288 beq/l r6, ZERO, tr0
1289
1290 /* Trace it by calling syscall_trace before and after */
1291 movi syscall_trace, r4
1292 ptabs r4, tr0
1293 blink tr0, LINK
1294 /* Reload syscall number as r5 is trashed by syscall_trace */
1295 ld.q SP, FRAME_S(FSYSCALL_ID), r5
1296 andi r5, 0x1ff, r5
1297
1298 pta syscall_ret_trace, tr0
1299 gettr tr0, LINK
1300
1301syscall_notrace:
1302 /* Now point to the appropriate 4th level syscall handler */
1303 movi sys_call_table, r4
1304 shlli r5, 2, r5
1305 ldx.l r4, r5, r5
1306 ptabs r5, tr0
1307
1308 /* Prepare original args */
1309 ld.q SP, FRAME_R(2), r2
1310 ld.q SP, FRAME_R(3), r3
1311 ld.q SP, FRAME_R(4), r4
1312 ld.q SP, FRAME_R(5), r5
1313 ld.q SP, FRAME_R(6), r6
1314 ld.q SP, FRAME_R(7), r7
1315
1316 /* And now the trick for those syscalls requiring regs * ! */
1317 or SP, ZERO, r8
1318
1319 /* Call it */
1320 blink tr0, ZERO /* LINK is already properly set */
1321
1322syscall_ret_trace:
1323 /* We get back here only if under trace */
1324 st.q SP, FRAME_R(9), r2 /* Save return value */
1325
1326 movi syscall_trace, LINK
1327 ptabs LINK, tr0
1328 blink tr0, LINK
1329
1330 /* This needs to be done after any syscall tracing */
1331 ld.q SP, FRAME_S(FSPC), r2
1332 addi r2, 4, r2 /* Move PC, being pre-execution event */
1333 st.q SP, FRAME_S(FSPC), r2
1334
1335 pta ret_from_syscall, tr0
1336 blink tr0, ZERO /* Resume normal return sequence */
1337
1338/*
1339 * --- Switch to running under a particular ASID and return the previous ASID value
1340 * --- The caller is assumed to have done a cli before calling this.
1341 *
1342 * Input r2 : new ASID
1343 * Output r2 : old ASID
1344 */
1345
1346 .global switch_and_save_asid
1347switch_and_save_asid:
1348 getcon sr, r0
1349 movi 255, r4
1350 shlli r4, 16, r4 /* r4 = mask to select ASID */
1351 and r0, r4, r3 /* r3 = shifted old ASID */
1352 andi r2, 255, r2 /* mask down new ASID */
1353 shlli r2, 16, r2 /* align new ASID against SR.ASID */
1354 andc r0, r4, r0 /* efface old ASID from SR */
1355 or r0, r2, r0 /* insert the new ASID */
1356 putcon r0, ssr
1357 movi 1f, r0
1358 putcon r0, spc
1359 rte
1360 nop
13611:
1362 ptabs LINK, tr0
1363 shlri r3, 16, r2 /* r2 = old ASID */
1364 blink tr0, r63
1365
1366 .global route_to_panic_handler
1367route_to_panic_handler:
1368 /* Switch to real mode, goto panic_handler, don't return. Useful for
1369 last-chance debugging, e.g. if no output wants to go to the console.
1370 */
1371
1372 movi panic_handler - CONFIG_CACHED_MEMORY_OFFSET, r1
1373 ptabs r1, tr0
1374 pta 1f, tr1
1375 gettr tr1, r0
1376 putcon r0, spc
1377 getcon sr, r0
1378 movi 1, r1
1379 shlli r1, 31, r1
1380 andc r0, r1, r0
1381 putcon r0, ssr
1382 rte
1383 nop
13841: /* Now in real mode */
1385 blink tr0, r63
1386 nop
1387
1388 .global peek_real_address_q
1389peek_real_address_q:
1390 /* Two args:
1391 r2 : real mode address to peek
1392 r2(out) : result quadword
1393
1394 This is provided as a cheapskate way of manipulating device
1395 registers for debugging (to avoid the need to onchip_remap the debug
1396 module, and to avoid the need to onchip_remap the watchpoint
1397 controller in a way that identity maps sufficient bits to avoid the
1398 SH5-101 cut2 silicon defect).
1399
1400 This code is not performance critical
1401 */
1402
1403 add.l r2, r63, r2 /* sign extend address */
1404 getcon sr, r0 /* r0 = saved original SR */
1405 movi 1, r1
1406 shlli r1, 28, r1
1407 or r0, r1, r1 /* r0 with block bit set */
1408 putcon r1, sr /* now in critical section */
1409 movi 1, r36
1410 shlli r36, 31, r36
1411 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1412
1413 putcon r1, ssr
1414 movi .peek0 - CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */
1415 movi 1f, r37 /* virtual mode return addr */
1416 putcon r36, spc
1417
1418 synco
1419 rte
1420 nop
1421
1422.peek0: /* come here in real mode, don't touch caches!!
1423 still in critical section (sr.bl==1) */
1424 putcon r0, ssr
1425 putcon r37, spc
1426 /* Here's the actual peek. If the address is bad, all bets are now off
1427 * what will happen (handlers invoked in real-mode = bad news) */
1428 ld.q r2, 0, r2
1429 synco
1430 rte /* Back to virtual mode */
1431 nop
1432
14331:
1434 ptabs LINK, tr0
1435 blink tr0, r63
1436
1437 .global poke_real_address_q
1438poke_real_address_q:
1439 /* Two args:
1440 r2 : real mode address to poke
1441 r3 : quadword value to write.
1442
1443 This is provided as a cheapskate way of manipulating device
1444 registers for debugging (to avoid the need to onchip_remap the debug
1445 module, and to avoid the need to onchip_remap the watchpoint
1446 controller in a way that identity maps sufficient bits to avoid the
1447 SH5-101 cut2 silicon defect).
1448
1449 This code is not performance critical
1450 */
1451
1452 add.l r2, r63, r2 /* sign extend address */
1453 getcon sr, r0 /* r0 = saved original SR */
1454 movi 1, r1
1455 shlli r1, 28, r1
1456 or r0, r1, r1 /* r0 with block bit set */
1457 putcon r1, sr /* now in critical section */
1458 movi 1, r36
1459 shlli r36, 31, r36
1460 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1461
1462 putcon r1, ssr
1463 movi .poke0-CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */
1464 movi 1f, r37 /* virtual mode return addr */
1465 putcon r36, spc
1466
1467 synco
1468 rte
1469 nop
1470
1471.poke0: /* come here in real mode, don't touch caches!!
1472 still in critical section (sr.bl==1) */
1473 putcon r0, ssr
1474 putcon r37, spc
1475 /* Here's the actual poke. If the address is bad, all bets are now off
1476 * what will happen (handlers invoked in real-mode = bad news) */
1477 st.q r2, 0, r3
1478 synco
1479 rte /* Back to virtual mode */
1480 nop
1481
14821:
1483 ptabs LINK, tr0
1484 blink tr0, r63
1485
1486/*
1487 * --- User Access Handling Section
1488 */
1489
1490/*
1491 * User Access support. It all moved to non inlined Assembler
1492 * functions in here.
1493 *
1494 * __kernel_size_t __copy_user(void *__to, const void *__from,
1495 * __kernel_size_t __n)
1496 *
1497 * Inputs:
1498 * (r2) target address
1499 * (r3) source address
1500 * (r4) size in bytes
1501 *
1502 * Ouputs:
1503 * (*r2) target data
1504 * (r2) non-copied bytes
1505 *
1506 * If a fault occurs on the user pointer, bail out early and return the
1507 * number of bytes not copied in r2.
1508 * Strategy : for large blocks, call a real memcpy function which can
1509 * move >1 byte at a time using unaligned ld/st instructions, and can
1510 * manipulate the cache using prefetch + alloco to improve the speed
1511 * further. If a fault occurs in that function, just revert to the
1512 * byte-by-byte approach used for small blocks; this is rare so the
1513 * performance hit for that case does not matter.
1514 *
1515 * For small blocks it's not worth the overhead of setting up and calling
1516 * the memcpy routine; do the copy a byte at a time.
1517 *
1518 */
1519 .global __copy_user
1520__copy_user:
1521 pta __copy_user_byte_by_byte, tr1
1522 movi 16, r0 ! this value is a best guess, should tune it by benchmarking
1523 bge/u r0, r4, tr1
1524 pta copy_user_memcpy, tr0
1525 addi SP, -32, SP
1526 /* Save arguments in case we have to fix-up unhandled page fault */
1527 st.q SP, 0, r2
1528 st.q SP, 8, r3
1529 st.q SP, 16, r4
1530 st.q SP, 24, r35 ! r35 is callee-save
1531 /* Save LINK in a register to reduce RTS time later (otherwise
1532 ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1533 ori LINK, 0, r35
1534 blink tr0, LINK
1535
1536 /* Copy completed normally if we get back here */
1537 ptabs r35, tr0
1538 ld.q SP, 24, r35
1539 /* don't restore r2-r4, pointless */
1540 /* set result=r2 to zero as the copy must have succeeded. */
1541 or r63, r63, r2
1542 addi SP, 32, SP
1543 blink tr0, r63 ! RTS
1544
1545 .global __copy_user_fixup
1546__copy_user_fixup:
1547 /* Restore stack frame */
1548 ori r35, 0, LINK
1549 ld.q SP, 24, r35
1550 ld.q SP, 16, r4
1551 ld.q SP, 8, r3
1552 ld.q SP, 0, r2
1553 addi SP, 32, SP
1554 /* Fall through to original code, in the 'same' state we entered with */
1555
1556/* The slow byte-by-byte method is used if the fast copy traps due to a bad
1557 user address. In that rare case, the speed drop can be tolerated. */
1558__copy_user_byte_by_byte:
1559 pta ___copy_user_exit, tr1
1560 pta ___copy_user1, tr0
1561 beq/u r4, r63, tr1 /* early exit for zero length copy */
1562 sub r2, r3, r0
1563 addi r0, -1, r0
1564
1565___copy_user1:
1566 ld.b r3, 0, r5 /* Fault address 1 */
1567
1568 /* Could rewrite this to use just 1 add, but the second comes 'free'
1569 due to load latency */
1570 addi r3, 1, r3
1571 addi r4, -1, r4 /* No real fixup required */
1572___copy_user2:
1573 stx.b r3, r0, r5 /* Fault address 2 */
1574 bne r4, ZERO, tr0
1575
1576___copy_user_exit:
1577 or r4, ZERO, r2
1578 ptabs LINK, tr0
1579 blink tr0, ZERO
1580
1581/*
1582 * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1583 *
1584 * Inputs:
1585 * (r2) target address
1586 * (r3) size in bytes
1587 *
1588 * Ouputs:
1589 * (*r2) zero-ed target data
1590 * (r2) non-zero-ed bytes
1591 */
1592 .global __clear_user
1593__clear_user:
1594 pta ___clear_user_exit, tr1
1595 pta ___clear_user1, tr0
1596 beq/u r3, r63, tr1
1597
1598___clear_user1:
1599 st.b r2, 0, ZERO /* Fault address */
1600 addi r2, 1, r2
1601 addi r3, -1, r3 /* No real fixup required */
1602 bne r3, ZERO, tr0
1603
1604___clear_user_exit:
1605 or r3, ZERO, r2
1606 ptabs LINK, tr0
1607 blink tr0, ZERO
1608
1609
1610/*
1611 * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
1612 * int __count)
1613 *
1614 * Inputs:
1615 * (r2) target address
1616 * (r3) source address
1617 * (r4) maximum size in bytes
1618 *
1619 * Ouputs:
1620 * (*r2) copied data
1621 * (r2) -EFAULT (in case of faulting)
1622 * copied data (otherwise)
1623 */
1624 .global __strncpy_from_user
1625__strncpy_from_user:
1626 pta ___strncpy_from_user1, tr0
1627 pta ___strncpy_from_user_done, tr1
1628 or r4, ZERO, r5 /* r5 = original count */
1629 beq/u r4, r63, tr1 /* early exit if r4==0 */
1630 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1631 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1632
1633___strncpy_from_user1:
1634 ld.b r3, 0, r7 /* Fault address: only in reading */
1635 st.b r2, 0, r7
1636 addi r2, 1, r2
1637 addi r3, 1, r3
1638 beq/u ZERO, r7, tr1
1639 addi r4, -1, r4 /* return real number of copied bytes */
1640 bne/l ZERO, r4, tr0
1641
1642___strncpy_from_user_done:
1643 sub r5, r4, r6 /* If done, return copied */
1644
1645___strncpy_from_user_exit:
1646 or r6, ZERO, r2
1647 ptabs LINK, tr0
1648 blink tr0, ZERO
1649
1650/*
1651 * extern long __strnlen_user(const char *__s, long __n)
1652 *
1653 * Inputs:
1654 * (r2) source address
1655 * (r3) source size in bytes
1656 *
1657 * Ouputs:
1658 * (r2) -EFAULT (in case of faulting)
1659 * string length (otherwise)
1660 */
1661 .global __strnlen_user
1662__strnlen_user:
1663 pta ___strnlen_user_set_reply, tr0
1664 pta ___strnlen_user1, tr1
1665 or ZERO, ZERO, r5 /* r5 = counter */
1666 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1667 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1668 beq r3, ZERO, tr0
1669
1670___strnlen_user1:
1671 ldx.b r2, r5, r7 /* Fault address: only in reading */
1672 addi r3, -1, r3 /* No real fixup */
1673 addi r5, 1, r5
1674 beq r3, ZERO, tr0
1675 bne r7, ZERO, tr1
1676! The line below used to be active. This meant led to a junk byte lying between each pair
1677! of entries in the argv & envp structures in memory. Whilst the program saw the right data
1678! via the argv and envp arguments to main, it meant the 'flat' representation visible through
1679! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
1680! addi r5, 1, r5 /* Include '\0' */
1681
1682___strnlen_user_set_reply:
1683 or r5, ZERO, r6 /* If done, return counter */
1684
1685___strnlen_user_exit:
1686 or r6, ZERO, r2
1687 ptabs LINK, tr0
1688 blink tr0, ZERO
1689
1690/*
1691 * extern long __get_user_asm_?(void *val, long addr)
1692 *
1693 * Inputs:
1694 * (r2) dest address
1695 * (r3) source address (in User Space)
1696 *
1697 * Ouputs:
1698 * (r2) -EFAULT (faulting)
1699 * 0 (not faulting)
1700 */
1701 .global __get_user_asm_b
1702__get_user_asm_b:
1703 or r2, ZERO, r4
1704 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1705
1706___get_user_asm_b1:
1707 ld.b r3, 0, r5 /* r5 = data */
1708 st.b r4, 0, r5
1709 or ZERO, ZERO, r2
1710
1711___get_user_asm_b_exit:
1712 ptabs LINK, tr0
1713 blink tr0, ZERO
1714
1715
1716 .global __get_user_asm_w
1717__get_user_asm_w:
1718 or r2, ZERO, r4
1719 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1720
1721___get_user_asm_w1:
1722 ld.w r3, 0, r5 /* r5 = data */
1723 st.w r4, 0, r5
1724 or ZERO, ZERO, r2
1725
1726___get_user_asm_w_exit:
1727 ptabs LINK, tr0
1728 blink tr0, ZERO
1729
1730
1731 .global __get_user_asm_l
1732__get_user_asm_l:
1733 or r2, ZERO, r4
1734 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1735
1736___get_user_asm_l1:
1737 ld.l r3, 0, r5 /* r5 = data */
1738 st.l r4, 0, r5
1739 or ZERO, ZERO, r2
1740
1741___get_user_asm_l_exit:
1742 ptabs LINK, tr0
1743 blink tr0, ZERO
1744
1745
1746 .global __get_user_asm_q
1747__get_user_asm_q:
1748 or r2, ZERO, r4
1749 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1750
1751___get_user_asm_q1:
1752 ld.q r3, 0, r5 /* r5 = data */
1753 st.q r4, 0, r5
1754 or ZERO, ZERO, r2
1755
1756___get_user_asm_q_exit:
1757 ptabs LINK, tr0
1758 blink tr0, ZERO
1759
1760/*
1761 * extern long __put_user_asm_?(void *pval, long addr)
1762 *
1763 * Inputs:
1764 * (r2) kernel pointer to value
1765 * (r3) dest address (in User Space)
1766 *
1767 * Ouputs:
1768 * (r2) -EFAULT (faulting)
1769 * 0 (not faulting)
1770 */
1771 .global __put_user_asm_b
1772__put_user_asm_b:
1773 ld.b r2, 0, r4 /* r4 = data */
1774 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1775
1776___put_user_asm_b1:
1777 st.b r3, 0, r4
1778 or ZERO, ZERO, r2
1779
1780___put_user_asm_b_exit:
1781 ptabs LINK, tr0
1782 blink tr0, ZERO
1783
1784
1785 .global __put_user_asm_w
1786__put_user_asm_w:
1787 ld.w r2, 0, r4 /* r4 = data */
1788 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1789
1790___put_user_asm_w1:
1791 st.w r3, 0, r4
1792 or ZERO, ZERO, r2
1793
1794___put_user_asm_w_exit:
1795 ptabs LINK, tr0
1796 blink tr0, ZERO
1797
1798
1799 .global __put_user_asm_l
1800__put_user_asm_l:
1801 ld.l r2, 0, r4 /* r4 = data */
1802 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1803
1804___put_user_asm_l1:
1805 st.l r3, 0, r4
1806 or ZERO, ZERO, r2
1807
1808___put_user_asm_l_exit:
1809 ptabs LINK, tr0
1810 blink tr0, ZERO
1811
1812
1813 .global __put_user_asm_q
1814__put_user_asm_q:
1815 ld.q r2, 0, r4 /* r4 = data */
1816 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1817
1818___put_user_asm_q1:
1819 st.q r3, 0, r4
1820 or ZERO, ZERO, r2
1821
1822___put_user_asm_q_exit:
1823 ptabs LINK, tr0
1824 blink tr0, ZERO
1825
1826panic_stash_regs:
1827 /* The idea is : when we get an unhandled panic, we dump the registers
1828 to a known memory location, the just sit in a tight loop.
1829 This allows the human to look at the memory region through the GDB
1830 session (assuming the debug module's SHwy initiator isn't locked up
1831 or anything), to hopefully analyze the cause of the panic. */
1832
1833 /* On entry, former r15 (SP) is in DCR
1834 former r0 is at resvec_saved_area + 0
1835 former r1 is at resvec_saved_area + 8
1836 former tr0 is at resvec_saved_area + 32
1837 DCR is the only register whose value is lost altogether.
1838 */
1839
1840 movi 0xffffffff80000000, r0 ! phy of dump area
1841 ld.q SP, 0x000, r1 ! former r0
1842 st.q r0, 0x000, r1
1843 ld.q SP, 0x008, r1 ! former r1
1844 st.q r0, 0x008, r1
1845 st.q r0, 0x010, r2
1846 st.q r0, 0x018, r3
1847 st.q r0, 0x020, r4
1848 st.q r0, 0x028, r5
1849 st.q r0, 0x030, r6
1850 st.q r0, 0x038, r7
1851 st.q r0, 0x040, r8
1852 st.q r0, 0x048, r9
1853 st.q r0, 0x050, r10
1854 st.q r0, 0x058, r11
1855 st.q r0, 0x060, r12
1856 st.q r0, 0x068, r13
1857 st.q r0, 0x070, r14
1858 getcon dcr, r14
1859 st.q r0, 0x078, r14
1860 st.q r0, 0x080, r16
1861 st.q r0, 0x088, r17
1862 st.q r0, 0x090, r18
1863 st.q r0, 0x098, r19
1864 st.q r0, 0x0a0, r20
1865 st.q r0, 0x0a8, r21
1866 st.q r0, 0x0b0, r22
1867 st.q r0, 0x0b8, r23
1868 st.q r0, 0x0c0, r24
1869 st.q r0, 0x0c8, r25
1870 st.q r0, 0x0d0, r26
1871 st.q r0, 0x0d8, r27
1872 st.q r0, 0x0e0, r28
1873 st.q r0, 0x0e8, r29
1874 st.q r0, 0x0f0, r30
1875 st.q r0, 0x0f8, r31
1876 st.q r0, 0x100, r32
1877 st.q r0, 0x108, r33
1878 st.q r0, 0x110, r34
1879 st.q r0, 0x118, r35
1880 st.q r0, 0x120, r36
1881 st.q r0, 0x128, r37
1882 st.q r0, 0x130, r38
1883 st.q r0, 0x138, r39
1884 st.q r0, 0x140, r40
1885 st.q r0, 0x148, r41
1886 st.q r0, 0x150, r42
1887 st.q r0, 0x158, r43
1888 st.q r0, 0x160, r44
1889 st.q r0, 0x168, r45
1890 st.q r0, 0x170, r46
1891 st.q r0, 0x178, r47
1892 st.q r0, 0x180, r48
1893 st.q r0, 0x188, r49
1894 st.q r0, 0x190, r50
1895 st.q r0, 0x198, r51
1896 st.q r0, 0x1a0, r52
1897 st.q r0, 0x1a8, r53
1898 st.q r0, 0x1b0, r54
1899 st.q r0, 0x1b8, r55
1900 st.q r0, 0x1c0, r56
1901 st.q r0, 0x1c8, r57
1902 st.q r0, 0x1d0, r58
1903 st.q r0, 0x1d8, r59
1904 st.q r0, 0x1e0, r60
1905 st.q r0, 0x1e8, r61
1906 st.q r0, 0x1f0, r62
1907 st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
1908
1909 ld.q SP, 0x020, r1 ! former tr0
1910 st.q r0, 0x200, r1
1911 gettr tr1, r1
1912 st.q r0, 0x208, r1
1913 gettr tr2, r1
1914 st.q r0, 0x210, r1
1915 gettr tr3, r1
1916 st.q r0, 0x218, r1
1917 gettr tr4, r1
1918 st.q r0, 0x220, r1
1919 gettr tr5, r1
1920 st.q r0, 0x228, r1
1921 gettr tr6, r1
1922 st.q r0, 0x230, r1
1923 gettr tr7, r1
1924 st.q r0, 0x238, r1
1925
1926 getcon sr, r1
1927 getcon ssr, r2
1928 getcon pssr, r3
1929 getcon spc, r4
1930 getcon pspc, r5
1931 getcon intevt, r6
1932 getcon expevt, r7
1933 getcon pexpevt, r8
1934 getcon tra, r9
1935 getcon tea, r10
1936 getcon kcr0, r11
1937 getcon kcr1, r12
1938 getcon vbr, r13
1939 getcon resvec, r14
1940
1941 st.q r0, 0x240, r1
1942 st.q r0, 0x248, r2
1943 st.q r0, 0x250, r3
1944 st.q r0, 0x258, r4
1945 st.q r0, 0x260, r5
1946 st.q r0, 0x268, r6
1947 st.q r0, 0x270, r7
1948 st.q r0, 0x278, r8
1949 st.q r0, 0x280, r9
1950 st.q r0, 0x288, r10
1951 st.q r0, 0x290, r11
1952 st.q r0, 0x298, r12
1953 st.q r0, 0x2a0, r13
1954 st.q r0, 0x2a8, r14
1955
1956 getcon SPC,r2
1957 getcon SSR,r3
1958 getcon EXPEVT,r4
1959 /* Prepare to jump to C - physical address */
1960 movi panic_handler-CONFIG_CACHED_MEMORY_OFFSET, r1
1961 ori r1, 1, r1
1962 ptabs r1, tr0
1963 getcon DCR, SP
1964 blink tr0, ZERO
1965 nop
1966 nop
1967 nop
1968 nop
1969
1970
1971
1972
1973/*
1974 * --- Signal Handling Section
1975 */
1976
1977/*
1978 * extern long long _sa_default_rt_restorer
1979 * extern long long _sa_default_restorer
1980 *
1981 * or, better,
1982 *
1983 * extern void _sa_default_rt_restorer(void)
1984 * extern void _sa_default_restorer(void)
1985 *
1986 * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
1987 * from user space. Copied into user space by signal management.
1988 * Both must be quad aligned and 2 quad long (4 instructions).
1989 *
1990 */
1991 .balign 8
1992 .global sa_default_rt_restorer
1993sa_default_rt_restorer:
1994 movi 0x10, r9
1995 shori __NR_rt_sigreturn, r9
1996 trapa r9
1997 nop
1998
1999 .balign 8
2000 .global sa_default_restorer
2001sa_default_restorer:
2002 movi 0x10, r9
2003 shori __NR_sigreturn, r9
2004 trapa r9
2005 nop
2006
2007/*
2008 * --- __ex_table Section
2009 */
2010
2011/*
2012 * User Access Exception Table.
2013 */
2014 .section __ex_table, "a"
2015
2016 .global asm_uaccess_start /* Just a marker */
2017asm_uaccess_start:
2018
2019 .long ___copy_user1, ___copy_user_exit
2020 .long ___copy_user2, ___copy_user_exit
2021 .long ___clear_user1, ___clear_user_exit
2022 .long ___strncpy_from_user1, ___strncpy_from_user_exit
2023 .long ___strnlen_user1, ___strnlen_user_exit
2024 .long ___get_user_asm_b1, ___get_user_asm_b_exit
2025 .long ___get_user_asm_w1, ___get_user_asm_w_exit
2026 .long ___get_user_asm_l1, ___get_user_asm_l_exit
2027 .long ___get_user_asm_q1, ___get_user_asm_q_exit
2028 .long ___put_user_asm_b1, ___put_user_asm_b_exit
2029 .long ___put_user_asm_w1, ___put_user_asm_w_exit
2030 .long ___put_user_asm_l1, ___put_user_asm_l_exit
2031 .long ___put_user_asm_q1, ___put_user_asm_q_exit
2032
2033 .global asm_uaccess_end /* Just a marker */
2034asm_uaccess_end:
2035
2036
2037
2038
2039/*
2040 * --- .text.init Section
2041 */
2042
2043 .section .text.init, "ax"
2044
2045/*
2046 * void trap_init (void)
2047 *
2048 */
2049 .global trap_init
2050trap_init:
2051 addi SP, -24, SP /* Room to save r28/r29/r30 */
2052 st.q SP, 0, r28
2053 st.q SP, 8, r29
2054 st.q SP, 16, r30
2055
2056 /* Set VBR and RESVEC */
2057 movi LVBR_block, r19
2058 andi r19, -4, r19 /* reset MMUOFF + reserved */
2059 /* For RESVEC exceptions we force the MMU off, which means we need the
2060 physical address. */
2061 movi LRESVEC_block-CONFIG_CACHED_MEMORY_OFFSET, r20
2062 andi r20, -4, r20 /* reset reserved */
2063 ori r20, 1, r20 /* set MMUOFF */
2064 putcon r19, VBR
2065 putcon r20, RESVEC
2066
2067 /* Sanity check */
2068 movi LVBR_block_end, r21
2069 andi r21, -4, r21
2070 movi BLOCK_SIZE, r29 /* r29 = expected size */
2071 or r19, ZERO, r30
2072 add r19, r29, r19
2073
2074 /*
2075 * Ugly, but better loop forever now than crash afterwards.
2076 * We should print a message, but if we touch LVBR or
2077 * LRESVEC blocks we should not be surprised if we get stuck
2078 * in trap_init().
2079 */
2080 pta trap_init_loop, tr1
2081 gettr tr1, r28 /* r28 = trap_init_loop */
2082 sub r21, r30, r30 /* r30 = actual size */
2083
2084 /*
2085 * VBR/RESVEC handlers overlap by being bigger than
2086 * allowed. Very bad. Just loop forever.
2087 * (r28) panic/loop address
2088 * (r29) expected size
2089 * (r30) actual size
2090 */
2091trap_init_loop:
2092 bne r19, r21, tr1
2093
2094 /* Now that exception vectors are set up reset SR.BL */
2095 getcon SR, r22
2096 movi SR_UNBLOCK_EXC, r23
2097 and r22, r23, r22
2098 putcon r22, SR
2099
2100 addi SP, 24, SP
2101 ptabs LINK, tr0
2102 blink tr0, ZERO
2103