aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/cpu/sh5/entry.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/cpu/sh5/entry.S')
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S2101
1 files changed, 2101 insertions, 0 deletions
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
new file mode 100644
index 00000000000..ba8750176d9
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -0,0 +1,2101 @@
1/*
2 * arch/sh/kernel/cpu/sh5/entry.S
3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2004 - 2007 Paul Mundt
6 * Copyright (C) 2003, 2004 Richard Curnow
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/errno.h>
13#include <linux/sys.h>
14#include <asm/cpu/registers.h>
15#include <asm/processor.h>
16#include <asm/unistd.h>
17#include <asm/thread_info.h>
18#include <asm/asm-offsets.h>
19
20/*
21 * SR fields.
22 */
23#define SR_ASID_MASK 0x00ff0000
24#define SR_FD_MASK 0x00008000
25#define SR_SS 0x08000000
26#define SR_BL 0x10000000
27#define SR_MD 0x40000000
28
29/*
30 * Event code.
31 */
32#define EVENT_INTERRUPT 0
33#define EVENT_FAULT_TLB 1
34#define EVENT_FAULT_NOT_TLB 2
35#define EVENT_DEBUG 3
36
37/* EXPEVT values */
38#define RESET_CAUSE 0x20
39#define DEBUGSS_CAUSE 0x980
40
41/*
42 * Frame layout. Quad index.
43 */
44#define FRAME_T(x) FRAME_TBASE+(x*8)
45#define FRAME_R(x) FRAME_RBASE+(x*8)
46#define FRAME_S(x) FRAME_SBASE+(x*8)
47#define FSPC 0
48#define FSSR 1
49#define FSYSCALL_ID 2
50
51/* Arrange the save frame to be a multiple of 32 bytes long */
52#define FRAME_SBASE 0
53#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
54#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
55#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
56#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
57
58#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
59#define FP_FRAME_BASE 0
60
61#define SAVED_R2 0*8
62#define SAVED_R3 1*8
63#define SAVED_R4 2*8
64#define SAVED_R5 3*8
65#define SAVED_R18 4*8
66#define SAVED_R6 5*8
67#define SAVED_TR0 6*8
68
69/* These are the registers saved in the TLB path that aren't saved in the first
70 level of the normal one. */
71#define TLB_SAVED_R25 7*8
72#define TLB_SAVED_TR1 8*8
73#define TLB_SAVED_TR2 9*8
74#define TLB_SAVED_TR3 10*8
75#define TLB_SAVED_TR4 11*8
76/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
77 breakage otherwise. */
78#define TLB_SAVED_R0 12*8
79#define TLB_SAVED_R1 13*8
80
81#define CLI() \
82 getcon SR, r6; \
83 ori r6, 0xf0, r6; \
84 putcon r6, SR;
85
86#define STI() \
87 getcon SR, r6; \
88 andi r6, ~0xf0, r6; \
89 putcon r6, SR;
90
91#ifdef CONFIG_PREEMPT
92# define preempt_stop() CLI()
93#else
94# define preempt_stop()
95# define resume_kernel restore_all
96#endif
97
98 .section .data, "aw"
99
100#define FAST_TLBMISS_STACK_CACHELINES 4
101#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
102
103/* Register back-up area for all exceptions */
104 .balign 32
105 /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
106 * register saves etc. */
107 .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
108/* This is 32 byte aligned by construction */
109/* Register back-up area for all exceptions */
110reg_save_area:
111 .quad 0
112 .quad 0
113 .quad 0
114 .quad 0
115
116 .quad 0
117 .quad 0
118 .quad 0
119 .quad 0
120
121 .quad 0
122 .quad 0
123 .quad 0
124 .quad 0
125
126 .quad 0
127 .quad 0
128
129/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
130 * reentrancy. Note this area may be accessed via physical address.
131 * Align so this fits a whole single cache line, for ease of purging.
132 */
133 .balign 32,0,32
134resvec_save_area:
135 .quad 0
136 .quad 0
137 .quad 0
138 .quad 0
139 .quad 0
140 .balign 32,0,32
141
142/* Jump table of 3rd level handlers */
143trap_jtable:
144 .long do_exception_error /* 0x000 */
145 .long do_exception_error /* 0x020 */
146 .long tlb_miss_load /* 0x040 */
147 .long tlb_miss_store /* 0x060 */
148 ! ARTIFICIAL pseudo-EXPEVT setting
149 .long do_debug_interrupt /* 0x080 */
150 .long tlb_miss_load /* 0x0A0 */
151 .long tlb_miss_store /* 0x0C0 */
152 .long do_address_error_load /* 0x0E0 */
153 .long do_address_error_store /* 0x100 */
154#ifdef CONFIG_SH_FPU
155 .long do_fpu_error /* 0x120 */
156#else
157 .long do_exception_error /* 0x120 */
158#endif
159 .long do_exception_error /* 0x140 */
160 .long system_call /* 0x160 */
161 .long do_reserved_inst /* 0x180 */
162 .long do_illegal_slot_inst /* 0x1A0 */
163 .long do_exception_error /* 0x1C0 - NMI */
164 .long do_exception_error /* 0x1E0 */
165 .rept 15
166 .long do_IRQ /* 0x200 - 0x3C0 */
167 .endr
168 .long do_exception_error /* 0x3E0 */
169 .rept 32
170 .long do_IRQ /* 0x400 - 0x7E0 */
171 .endr
172 .long fpu_error_or_IRQA /* 0x800 */
173 .long fpu_error_or_IRQB /* 0x820 */
174 .long do_IRQ /* 0x840 */
175 .long do_IRQ /* 0x860 */
176 .rept 6
177 .long do_exception_error /* 0x880 - 0x920 */
178 .endr
179 .long do_software_break_point /* 0x940 */
180 .long do_exception_error /* 0x960 */
181 .long do_single_step /* 0x980 */
182
183 .rept 3
184 .long do_exception_error /* 0x9A0 - 0x9E0 */
185 .endr
186 .long do_IRQ /* 0xA00 */
187 .long do_IRQ /* 0xA20 */
188 .long itlb_miss_or_IRQ /* 0xA40 */
189 .long do_IRQ /* 0xA60 */
190 .long do_IRQ /* 0xA80 */
191 .long itlb_miss_or_IRQ /* 0xAA0 */
192 .long do_exception_error /* 0xAC0 */
193 .long do_address_error_exec /* 0xAE0 */
194 .rept 8
195 .long do_exception_error /* 0xB00 - 0xBE0 */
196 .endr
197 .rept 18
198 .long do_IRQ /* 0xC00 - 0xE20 */
199 .endr
200
201 .section .text64, "ax"
202
203/*
204 * --- Exception/Interrupt/Event Handling Section
205 */
206
207/*
208 * VBR and RESVEC blocks.
209 *
210 * First level handler for VBR-based exceptions.
211 *
212 * To avoid waste of space, align to the maximum text block size.
213 * This is assumed to be at most 128 bytes or 32 instructions.
214 * DO NOT EXCEED 32 instructions on the first level handlers !
215 *
216 * Also note that RESVEC is contained within the VBR block
217 * where the room left (1KB - TEXT_SIZE) allows placing
218 * the RESVEC block (at most 512B + TEXT_SIZE).
219 *
220 * So first (and only) level handler for RESVEC-based exceptions.
221 *
222 * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
223 * and interrupt) we are a lot tight with register space until
224 * saving onto the stack frame, which is done in handle_exception().
225 *
226 */
227
228#define TEXT_SIZE 128
229#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
230
231 .balign TEXT_SIZE
232LVBR_block:
233 .space 256, 0 /* Power-on class handler, */
234 /* not required here */
235not_a_tlb_miss:
236 synco /* TAKum03020 (but probably a good idea anyway.) */
237 /* Save original stack pointer into KCR1 */
238 putcon SP, KCR1
239
240 /* Save other original registers into reg_save_area */
241 movi reg_save_area, SP
242 st.q SP, SAVED_R2, r2
243 st.q SP, SAVED_R3, r3
244 st.q SP, SAVED_R4, r4
245 st.q SP, SAVED_R5, r5
246 st.q SP, SAVED_R6, r6
247 st.q SP, SAVED_R18, r18
248 gettr tr0, r3
249 st.q SP, SAVED_TR0, r3
250
251 /* Set args for Non-debug, Not a TLB miss class handler */
252 getcon EXPEVT, r2
253 movi ret_from_exception, r3
254 ori r3, 1, r3
255 movi EVENT_FAULT_NOT_TLB, r4
256 or SP, ZERO, r5
257 getcon KCR1, SP
258 pta handle_exception, tr0
259 blink tr0, ZERO
260
261 .balign 256
262 ! VBR+0x200
263 nop
264 .balign 256
265 ! VBR+0x300
266 nop
267 .balign 256
268 /*
269 * Instead of the natural .balign 1024 place RESVEC here
270 * respecting the final 1KB alignment.
271 */
272 .balign TEXT_SIZE
273 /*
274 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
275 * block making sure the final alignment is correct.
276 */
277tlb_miss:
278 synco /* TAKum03020 (but probably a good idea anyway.) */
279 putcon SP, KCR1
280 movi reg_save_area, SP
281 /* SP is guaranteed 32-byte aligned. */
282 st.q SP, TLB_SAVED_R0 , r0
283 st.q SP, TLB_SAVED_R1 , r1
284 st.q SP, SAVED_R2 , r2
285 st.q SP, SAVED_R3 , r3
286 st.q SP, SAVED_R4 , r4
287 st.q SP, SAVED_R5 , r5
288 st.q SP, SAVED_R6 , r6
289 st.q SP, SAVED_R18, r18
290
291 /* Save R25 for safety; as/ld may want to use it to achieve the call to
292 * the code in mm/tlbmiss.c */
293 st.q SP, TLB_SAVED_R25, r25
294 gettr tr0, r2
295 gettr tr1, r3
296 gettr tr2, r4
297 gettr tr3, r5
298 gettr tr4, r18
299 st.q SP, SAVED_TR0 , r2
300 st.q SP, TLB_SAVED_TR1 , r3
301 st.q SP, TLB_SAVED_TR2 , r4
302 st.q SP, TLB_SAVED_TR3 , r5
303 st.q SP, TLB_SAVED_TR4 , r18
304
305 pt do_fast_page_fault, tr0
306 getcon SSR, r2
307 getcon EXPEVT, r3
308 getcon TEA, r4
309 shlri r2, 30, r2
310 andi r2, 1, r2 /* r2 = SSR.MD */
311 blink tr0, LINK
312
313 pt fixup_to_invoke_general_handler, tr1
314
315 /* If the fast path handler fixed the fault, just drop through quickly
316 to the restore code right away to return to the excepting context.
317 */
318 beqi/u r2, 0, tr1
319
320fast_tlb_miss_restore:
321 ld.q SP, SAVED_TR0, r2
322 ld.q SP, TLB_SAVED_TR1, r3
323 ld.q SP, TLB_SAVED_TR2, r4
324
325 ld.q SP, TLB_SAVED_TR3, r5
326 ld.q SP, TLB_SAVED_TR4, r18
327
328 ptabs r2, tr0
329 ptabs r3, tr1
330 ptabs r4, tr2
331 ptabs r5, tr3
332 ptabs r18, tr4
333
334 ld.q SP, TLB_SAVED_R0, r0
335 ld.q SP, TLB_SAVED_R1, r1
336 ld.q SP, SAVED_R2, r2
337 ld.q SP, SAVED_R3, r3
338 ld.q SP, SAVED_R4, r4
339 ld.q SP, SAVED_R5, r5
340 ld.q SP, SAVED_R6, r6
341 ld.q SP, SAVED_R18, r18
342 ld.q SP, TLB_SAVED_R25, r25
343
344 getcon KCR1, SP
345 rte
346 nop /* for safety, in case the code is run on sh5-101 cut1.x */
347
348fixup_to_invoke_general_handler:
349
350 /* OK, new method. Restore stuff that's not expected to get saved into
351 the 'first-level' reg save area, then just fall through to setting
352 up the registers and calling the second-level handler. */
353
354 /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
355 r25,tr1-4 and save r6 to get into the right state. */
356
357 ld.q SP, TLB_SAVED_TR1, r3
358 ld.q SP, TLB_SAVED_TR2, r4
359 ld.q SP, TLB_SAVED_TR3, r5
360 ld.q SP, TLB_SAVED_TR4, r18
361 ld.q SP, TLB_SAVED_R25, r25
362
363 ld.q SP, TLB_SAVED_R0, r0
364 ld.q SP, TLB_SAVED_R1, r1
365
366 ptabs/u r3, tr1
367 ptabs/u r4, tr2
368 ptabs/u r5, tr3
369 ptabs/u r18, tr4
370
371 /* Set args for Non-debug, TLB miss class handler */
372 getcon EXPEVT, r2
373 movi ret_from_exception, r3
374 ori r3, 1, r3
375 movi EVENT_FAULT_TLB, r4
376 or SP, ZERO, r5
377 getcon KCR1, SP
378 pta handle_exception, tr0
379 blink tr0, ZERO
380
381/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
382 DOES END UP AT VBR+0x600 */
383 nop
384 nop
385 nop
386 nop
387 nop
388 nop
389
390 .balign 256
391 /* VBR + 0x600 */
392
393interrupt:
394 synco /* TAKum03020 (but probably a good idea anyway.) */
395 /* Save original stack pointer into KCR1 */
396 putcon SP, KCR1
397
398 /* Save other original registers into reg_save_area */
399 movi reg_save_area, SP
400 st.q SP, SAVED_R2, r2
401 st.q SP, SAVED_R3, r3
402 st.q SP, SAVED_R4, r4
403 st.q SP, SAVED_R5, r5
404 st.q SP, SAVED_R6, r6
405 st.q SP, SAVED_R18, r18
406 gettr tr0, r3
407 st.q SP, SAVED_TR0, r3
408
409 /* Set args for interrupt class handler */
410 getcon INTEVT, r2
411 movi ret_from_irq, r3
412 ori r3, 1, r3
413 movi EVENT_INTERRUPT, r4
414 or SP, ZERO, r5
415 getcon KCR1, SP
416 pta handle_exception, tr0
417 blink tr0, ZERO
418 .balign TEXT_SIZE /* let's waste the bare minimum */
419
420LVBR_block_end: /* Marker. Used for total checking */
421
422 .balign 256
423LRESVEC_block:
424 /* Panic handler. Called with MMU off. Possible causes/actions:
425 * - Reset: Jump to program start.
426 * - Single Step: Turn off Single Step & return.
427 * - Others: Call panic handler, passing PC as arg.
428 * (this may need to be extended...)
429 */
430reset_or_panic:
431 synco /* TAKum03020 (but probably a good idea anyway.) */
432 putcon SP, DCR
433 /* First save r0-1 and tr0, as we need to use these */
434 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
435 st.q SP, 0, r0
436 st.q SP, 8, r1
437 gettr tr0, r0
438 st.q SP, 32, r0
439
440 /* Check cause */
441 getcon EXPEVT, r0
442 movi RESET_CAUSE, r1
443 sub r1, r0, r1 /* r1=0 if reset */
444 movi _stext-CONFIG_PAGE_OFFSET, r0
445 ori r0, 1, r0
446 ptabs r0, tr0
447 beqi r1, 0, tr0 /* Jump to start address if reset */
448
449 getcon EXPEVT, r0
450 movi DEBUGSS_CAUSE, r1
451 sub r1, r0, r1 /* r1=0 if single step */
452 pta single_step_panic, tr0
453 beqi r1, 0, tr0 /* jump if single step */
454
455 /* Now jump to where we save the registers. */
456 movi panic_stash_regs-CONFIG_PAGE_OFFSET, r1
457 ptabs r1, tr0
458 blink tr0, r63
459
460single_step_panic:
461 /* We are in a handler with Single Step set. We need to resume the
462 * handler, by turning on MMU & turning off Single Step. */
463 getcon SSR, r0
464 movi SR_MMU, r1
465 or r0, r1, r0
466 movi ~SR_SS, r1
467 and r0, r1, r0
468 putcon r0, SSR
469 /* Restore EXPEVT, as the rte won't do this */
470 getcon PEXPEVT, r0
471 putcon r0, EXPEVT
472 /* Restore regs */
473 ld.q SP, 32, r0
474 ptabs r0, tr0
475 ld.q SP, 0, r0
476 ld.q SP, 8, r1
477 getcon DCR, SP
478 synco
479 rte
480
481
482 .balign 256
483debug_exception:
484 synco /* TAKum03020 (but probably a good idea anyway.) */
485 /*
486 * Single step/software_break_point first level handler.
487 * Called with MMU off, so the first thing we do is enable it
488 * by doing an rte with appropriate SSR.
489 */
490 putcon SP, DCR
491 /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
492 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
493
494 /* With the MMU off, we are bypassing the cache, so purge any
495 * data that will be made stale by the following stores.
496 */
497 ocbp SP, 0
498 synco
499
500 st.q SP, 0, r0
501 st.q SP, 8, r1
502 getcon SPC, r0
503 st.q SP, 16, r0
504 getcon SSR, r0
505 st.q SP, 24, r0
506
507 /* Enable MMU, block exceptions, set priv mode, disable single step */
508 movi SR_MMU | SR_BL | SR_MD, r1
509 or r0, r1, r0
510 movi ~SR_SS, r1
511 and r0, r1, r0
512 putcon r0, SSR
513 /* Force control to debug_exception_2 when rte is executed */
514 movi debug_exeception_2, r0
515 ori r0, 1, r0 /* force SHmedia, just in case */
516 putcon r0, SPC
517 getcon DCR, SP
518 synco
519 rte
520debug_exeception_2:
521 /* Restore saved regs */
522 putcon SP, KCR1
523 movi resvec_save_area, SP
524 ld.q SP, 24, r0
525 putcon r0, SSR
526 ld.q SP, 16, r0
527 putcon r0, SPC
528 ld.q SP, 0, r0
529 ld.q SP, 8, r1
530
531 /* Save other original registers into reg_save_area */
532 movi reg_save_area, SP
533 st.q SP, SAVED_R2, r2
534 st.q SP, SAVED_R3, r3
535 st.q SP, SAVED_R4, r4
536 st.q SP, SAVED_R5, r5
537 st.q SP, SAVED_R6, r6
538 st.q SP, SAVED_R18, r18
539 gettr tr0, r3
540 st.q SP, SAVED_TR0, r3
541
542 /* Set args for debug class handler */
543 getcon EXPEVT, r2
544 movi ret_from_exception, r3
545 ori r3, 1, r3
546 movi EVENT_DEBUG, r4
547 or SP, ZERO, r5
548 getcon KCR1, SP
549 pta handle_exception, tr0
550 blink tr0, ZERO
551
552 .balign 256
553debug_interrupt:
554 /* !!! WE COME HERE IN REAL MODE !!! */
555 /* Hook-up debug interrupt to allow various debugging options to be
556 * hooked into its handler. */
557 /* Save original stack pointer into KCR1 */
558 synco
559 putcon SP, KCR1
560 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
561 ocbp SP, 0
562 ocbp SP, 32
563 synco
564
565 /* Save other original registers into reg_save_area thru real addresses */
566 st.q SP, SAVED_R2, r2
567 st.q SP, SAVED_R3, r3
568 st.q SP, SAVED_R4, r4
569 st.q SP, SAVED_R5, r5
570 st.q SP, SAVED_R6, r6
571 st.q SP, SAVED_R18, r18
572 gettr tr0, r3
573 st.q SP, SAVED_TR0, r3
574
575 /* move (spc,ssr)->(pspc,pssr). The rte will shift
576 them back again, so that they look like the originals
577 as far as the real handler code is concerned. */
578 getcon spc, r6
579 putcon r6, pspc
580 getcon ssr, r6
581 putcon r6, pssr
582
583 ! construct useful SR for handle_exception
584 movi 3, r6
585 shlli r6, 30, r6
586 getcon sr, r18
587 or r18, r6, r6
588 putcon r6, ssr
589
590 ! SSR is now the current SR with the MD and MMU bits set
591 ! i.e. the rte will switch back to priv mode and put
592 ! the mmu back on
593
594 ! construct spc
595 movi handle_exception, r18
596 ori r18, 1, r18 ! for safety (do we need this?)
597 putcon r18, spc
598
599 /* Set args for Non-debug, Not a TLB miss class handler */
600
601 ! EXPEVT==0x80 is unused, so 'steal' this value to put the
602 ! debug interrupt handler in the vectoring table
603 movi 0x80, r2
604 movi ret_from_exception, r3
605 ori r3, 1, r3
606 movi EVENT_FAULT_NOT_TLB, r4
607
608 or SP, ZERO, r5
609 movi CONFIG_PAGE_OFFSET, r6
610 add r6, r5, r5
611 getcon KCR1, SP
612
613 synco ! for safety
614 rte ! -> handle_exception, switch back to priv mode again
615
616LRESVEC_block_end: /* Marker. Unused. */
617
618 .balign TEXT_SIZE
619
620/*
621 * Second level handler for VBR-based exceptions. Pre-handler.
622 * In common to all stack-frame sensitive handlers.
623 *
624 * Inputs:
625 * (KCR0) Current [current task union]
626 * (KCR1) Original SP
627 * (r2) INTEVT/EXPEVT
628 * (r3) appropriate return address
629 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
630 * (r5) Pointer to reg_save_area
631 * (SP) Original SP
632 *
633 * Available registers:
634 * (r6)
635 * (r18)
636 * (tr0)
637 *
638 */
639handle_exception:
640 /* Common 2nd level handler. */
641
642 /* First thing we need an appropriate stack pointer */
643 getcon SSR, r6
644 shlri r6, 30, r6
645 andi r6, 1, r6
646 pta stack_ok, tr0
647 bne r6, ZERO, tr0 /* Original stack pointer is fine */
648
649 /* Set stack pointer for user fault */
650 getcon KCR0, SP
651 movi THREAD_SIZE, r6 /* Point to the end */
652 add SP, r6, SP
653
654stack_ok:
655
656/* DEBUG : check for underflow/overflow of the kernel stack */
657 pta no_underflow, tr0
658 getcon KCR0, r6
659 movi 1024, r18
660 add r6, r18, r6
661 bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
662
663/* Just panic to cause a crash. */
664bad_sp:
665 ld.b r63, 0, r6
666 nop
667
668no_underflow:
669 pta bad_sp, tr0
670 getcon kcr0, r6
671 movi THREAD_SIZE, r18
672 add r18, r6, r6
673 bgt SP, r6, tr0 ! sp above the stack
674
675 /* Make some room for the BASIC frame. */
676 movi -(FRAME_SIZE), r6
677 add SP, r6, SP
678
679/* Could do this with no stalling if we had another spare register, but the
680 code below will be OK. */
681 ld.q r5, SAVED_R2, r6
682 ld.q r5, SAVED_R3, r18
683 st.q SP, FRAME_R(2), r6
684 ld.q r5, SAVED_R4, r6
685 st.q SP, FRAME_R(3), r18
686 ld.q r5, SAVED_R5, r18
687 st.q SP, FRAME_R(4), r6
688 ld.q r5, SAVED_R6, r6
689 st.q SP, FRAME_R(5), r18
690 ld.q r5, SAVED_R18, r18
691 st.q SP, FRAME_R(6), r6
692 ld.q r5, SAVED_TR0, r6
693 st.q SP, FRAME_R(18), r18
694 st.q SP, FRAME_T(0), r6
695
696 /* Keep old SP around */
697 getcon KCR1, r6
698
699 /* Save the rest of the general purpose registers */
700 st.q SP, FRAME_R(0), r0
701 st.q SP, FRAME_R(1), r1
702 st.q SP, FRAME_R(7), r7
703 st.q SP, FRAME_R(8), r8
704 st.q SP, FRAME_R(9), r9
705 st.q SP, FRAME_R(10), r10
706 st.q SP, FRAME_R(11), r11
707 st.q SP, FRAME_R(12), r12
708 st.q SP, FRAME_R(13), r13
709 st.q SP, FRAME_R(14), r14
710
711 /* SP is somewhere else */
712 st.q SP, FRAME_R(15), r6
713
714 st.q SP, FRAME_R(16), r16
715 st.q SP, FRAME_R(17), r17
716 /* r18 is saved earlier. */
717 st.q SP, FRAME_R(19), r19
718 st.q SP, FRAME_R(20), r20
719 st.q SP, FRAME_R(21), r21
720 st.q SP, FRAME_R(22), r22
721 st.q SP, FRAME_R(23), r23
722 st.q SP, FRAME_R(24), r24
723 st.q SP, FRAME_R(25), r25
724 st.q SP, FRAME_R(26), r26
725 st.q SP, FRAME_R(27), r27
726 st.q SP, FRAME_R(28), r28
727 st.q SP, FRAME_R(29), r29
728 st.q SP, FRAME_R(30), r30
729 st.q SP, FRAME_R(31), r31
730 st.q SP, FRAME_R(32), r32
731 st.q SP, FRAME_R(33), r33
732 st.q SP, FRAME_R(34), r34
733 st.q SP, FRAME_R(35), r35
734 st.q SP, FRAME_R(36), r36
735 st.q SP, FRAME_R(37), r37
736 st.q SP, FRAME_R(38), r38
737 st.q SP, FRAME_R(39), r39
738 st.q SP, FRAME_R(40), r40
739 st.q SP, FRAME_R(41), r41
740 st.q SP, FRAME_R(42), r42
741 st.q SP, FRAME_R(43), r43
742 st.q SP, FRAME_R(44), r44
743 st.q SP, FRAME_R(45), r45
744 st.q SP, FRAME_R(46), r46
745 st.q SP, FRAME_R(47), r47
746 st.q SP, FRAME_R(48), r48
747 st.q SP, FRAME_R(49), r49
748 st.q SP, FRAME_R(50), r50
749 st.q SP, FRAME_R(51), r51
750 st.q SP, FRAME_R(52), r52
751 st.q SP, FRAME_R(53), r53
752 st.q SP, FRAME_R(54), r54
753 st.q SP, FRAME_R(55), r55
754 st.q SP, FRAME_R(56), r56
755 st.q SP, FRAME_R(57), r57
756 st.q SP, FRAME_R(58), r58
757 st.q SP, FRAME_R(59), r59
758 st.q SP, FRAME_R(60), r60
759 st.q SP, FRAME_R(61), r61
760 st.q SP, FRAME_R(62), r62
761
762 /*
763 * Save the S* registers.
764 */
765 getcon SSR, r61
766 st.q SP, FRAME_S(FSSR), r61
767 getcon SPC, r62
768 st.q SP, FRAME_S(FSPC), r62
769 movi -1, r62 /* Reset syscall_nr */
770 st.q SP, FRAME_S(FSYSCALL_ID), r62
771
772 /* Save the rest of the target registers */
773 gettr tr1, r6
774 st.q SP, FRAME_T(1), r6
775 gettr tr2, r6
776 st.q SP, FRAME_T(2), r6
777 gettr tr3, r6
778 st.q SP, FRAME_T(3), r6
779 gettr tr4, r6
780 st.q SP, FRAME_T(4), r6
781 gettr tr5, r6
782 st.q SP, FRAME_T(5), r6
783 gettr tr6, r6
784 st.q SP, FRAME_T(6), r6
785 gettr tr7, r6
786 st.q SP, FRAME_T(7), r6
787
788 ! setup FP so that unwinder can wind back through nested kernel mode
789 ! exceptions
790 add SP, ZERO, r14
791
792#ifdef CONFIG_POOR_MANS_STRACE
793 /* We've pushed all the registers now, so only r2-r4 hold anything
794 * useful. Move them into callee save registers */
795 or r2, ZERO, r28
796 or r3, ZERO, r29
797 or r4, ZERO, r30
798
799 /* Preserve r2 as the event code */
800 movi evt_debug, r3
801 ori r3, 1, r3
802 ptabs r3, tr0
803
804 or SP, ZERO, r6
805 getcon TRA, r5
806 blink tr0, LINK
807
808 or r28, ZERO, r2
809 or r29, ZERO, r3
810 or r30, ZERO, r4
811#endif
812
813 /* For syscall and debug race condition, get TRA now */
814 getcon TRA, r5
815
816 /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
817 * Also set FD, to catch FPU usage in the kernel.
818 *
819 * benedict.gaster@superh.com 29/07/2002
820 *
821 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
822 * same time change BL from 1->0, as any pending interrupt of a level
823 * higher than he previous value of IMASK will leak through and be
824 * taken unexpectedly.
825 *
826 * To avoid this we raise the IMASK and then issue another PUTCON to
827 * enable interrupts.
828 */
829 getcon SR, r6
830 movi SR_IMASK | SR_FD, r7
831 or r6, r7, r6
832 putcon r6, SR
833 movi SR_UNBLOCK_EXC, r7
834 and r6, r7, r6
835 putcon r6, SR
836
837
838 /* Now call the appropriate 3rd level handler */
839 or r3, ZERO, LINK
840 movi trap_jtable, r3
841 shlri r2, 3, r2
842 ldx.l r2, r3, r3
843 shlri r2, 2, r2
844 ptabs r3, tr0
845 or SP, ZERO, r3
846 blink tr0, ZERO
847
848/*
849 * Second level handler for VBR-based exceptions. Post-handlers.
850 *
851 * Post-handlers for interrupts (ret_from_irq), exceptions
852 * (ret_from_exception) and common reentrance doors (restore_all
853 * to get back to the original context, ret_from_syscall loop to
854 * check kernel exiting).
855 *
856 * ret_with_reschedule and work_notifysig are an inner lables of
857 * the ret_from_syscall loop.
858 *
859 * In common to all stack-frame sensitive handlers.
860 *
861 * Inputs:
862 * (SP) struct pt_regs *, original register's frame pointer (basic)
863 *
864 */
865 .global ret_from_irq
866ret_from_irq:
867#ifdef CONFIG_POOR_MANS_STRACE
868 pta evt_debug_ret_from_irq, tr0
869 ori SP, 0, r2
870 blink tr0, LINK
871#endif
872 ld.q SP, FRAME_S(FSSR), r6
873 shlri r6, 30, r6
874 andi r6, 1, r6
875 pta resume_kernel, tr0
876 bne r6, ZERO, tr0 /* no further checks */
877 STI()
878 pta ret_with_reschedule, tr0
879 blink tr0, ZERO /* Do not check softirqs */
880
881 .global ret_from_exception
882ret_from_exception:
883 preempt_stop()
884
885#ifdef CONFIG_POOR_MANS_STRACE
886 pta evt_debug_ret_from_exc, tr0
887 ori SP, 0, r2
888 blink tr0, LINK
889#endif
890
891 ld.q SP, FRAME_S(FSSR), r6
892 shlri r6, 30, r6
893 andi r6, 1, r6
894 pta resume_kernel, tr0
895 bne r6, ZERO, tr0 /* no further checks */
896
897 /* Check softirqs */
898
899#ifdef CONFIG_PREEMPT
900 pta ret_from_syscall, tr0
901 blink tr0, ZERO
902
903resume_kernel:
904 pta restore_all, tr0
905
906 getcon KCR0, r6
907 ld.l r6, TI_PRE_COUNT, r7
908 beq/u r7, ZERO, tr0
909
910need_resched:
911 ld.l r6, TI_FLAGS, r7
912 movi (1 << TIF_NEED_RESCHED), r8
913 and r8, r7, r8
914 bne r8, ZERO, tr0
915
916 getcon SR, r7
917 andi r7, 0xf0, r7
918 bne r7, ZERO, tr0
919
920 movi ((PREEMPT_ACTIVE >> 16) & 65535), r8
921 shori (PREEMPT_ACTIVE & 65535), r8
922 st.l r6, TI_PRE_COUNT, r8
923
924 STI()
925 movi schedule, r7
926 ori r7, 1, r7
927 ptabs r7, tr1
928 blink tr1, LINK
929
930 st.l r6, TI_PRE_COUNT, ZERO
931 CLI()
932
933 pta need_resched, tr1
934 blink tr1, ZERO
935#endif
936
937 .global ret_from_syscall
938ret_from_syscall:
939
940ret_with_reschedule:
941 getcon KCR0, r6 ! r6 contains current_thread_info
942 ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
943
944 movi _TIF_NEED_RESCHED, r8
945 and r8, r7, r8
946 pta work_resched, tr0
947 bne r8, ZERO, tr0
948
949 pta restore_all, tr1
950
951 movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
952 and r8, r7, r8
953 pta work_notifysig, tr0
954 bne r8, ZERO, tr0
955
956 blink tr1, ZERO
957
958work_resched:
959 pta ret_from_syscall, tr0
960 gettr tr0, LINK
961 movi schedule, r6
962 ptabs r6, tr0
963 blink tr0, ZERO /* Call schedule(), return on top */
964
965work_notifysig:
966 gettr tr1, LINK
967
968 movi do_signal, r6
969 ptabs r6, tr0
970 or SP, ZERO, r2
971 or ZERO, ZERO, r3
972 blink tr0, LINK /* Call do_signal(regs, 0), return here */
973
974restore_all:
975 /* Do prefetches */
976
977 ld.q SP, FRAME_T(0), r6
978 ld.q SP, FRAME_T(1), r7
979 ld.q SP, FRAME_T(2), r8
980 ld.q SP, FRAME_T(3), r9
981 ptabs r6, tr0
982 ptabs r7, tr1
983 ptabs r8, tr2
984 ptabs r9, tr3
985 ld.q SP, FRAME_T(4), r6
986 ld.q SP, FRAME_T(5), r7
987 ld.q SP, FRAME_T(6), r8
988 ld.q SP, FRAME_T(7), r9
989 ptabs r6, tr4
990 ptabs r7, tr5
991 ptabs r8, tr6
992 ptabs r9, tr7
993
994 ld.q SP, FRAME_R(0), r0
995 ld.q SP, FRAME_R(1), r1
996 ld.q SP, FRAME_R(2), r2
997 ld.q SP, FRAME_R(3), r3
998 ld.q SP, FRAME_R(4), r4
999 ld.q SP, FRAME_R(5), r5
1000 ld.q SP, FRAME_R(6), r6
1001 ld.q SP, FRAME_R(7), r7
1002 ld.q SP, FRAME_R(8), r8
1003 ld.q SP, FRAME_R(9), r9
1004 ld.q SP, FRAME_R(10), r10
1005 ld.q SP, FRAME_R(11), r11
1006 ld.q SP, FRAME_R(12), r12
1007 ld.q SP, FRAME_R(13), r13
1008 ld.q SP, FRAME_R(14), r14
1009
1010 ld.q SP, FRAME_R(16), r16
1011 ld.q SP, FRAME_R(17), r17
1012 ld.q SP, FRAME_R(18), r18
1013 ld.q SP, FRAME_R(19), r19
1014 ld.q SP, FRAME_R(20), r20
1015 ld.q SP, FRAME_R(21), r21
1016 ld.q SP, FRAME_R(22), r22
1017 ld.q SP, FRAME_R(23), r23
1018 ld.q SP, FRAME_R(24), r24
1019 ld.q SP, FRAME_R(25), r25
1020 ld.q SP, FRAME_R(26), r26
1021 ld.q SP, FRAME_R(27), r27
1022 ld.q SP, FRAME_R(28), r28
1023 ld.q SP, FRAME_R(29), r29
1024 ld.q SP, FRAME_R(30), r30
1025 ld.q SP, FRAME_R(31), r31
1026 ld.q SP, FRAME_R(32), r32
1027 ld.q SP, FRAME_R(33), r33
1028 ld.q SP, FRAME_R(34), r34
1029 ld.q SP, FRAME_R(35), r35
1030 ld.q SP, FRAME_R(36), r36
1031 ld.q SP, FRAME_R(37), r37
1032 ld.q SP, FRAME_R(38), r38
1033 ld.q SP, FRAME_R(39), r39
1034 ld.q SP, FRAME_R(40), r40
1035 ld.q SP, FRAME_R(41), r41
1036 ld.q SP, FRAME_R(42), r42
1037 ld.q SP, FRAME_R(43), r43
1038 ld.q SP, FRAME_R(44), r44
1039 ld.q SP, FRAME_R(45), r45
1040 ld.q SP, FRAME_R(46), r46
1041 ld.q SP, FRAME_R(47), r47
1042 ld.q SP, FRAME_R(48), r48
1043 ld.q SP, FRAME_R(49), r49
1044 ld.q SP, FRAME_R(50), r50
1045 ld.q SP, FRAME_R(51), r51
1046 ld.q SP, FRAME_R(52), r52
1047 ld.q SP, FRAME_R(53), r53
1048 ld.q SP, FRAME_R(54), r54
1049 ld.q SP, FRAME_R(55), r55
1050 ld.q SP, FRAME_R(56), r56
1051 ld.q SP, FRAME_R(57), r57
1052 ld.q SP, FRAME_R(58), r58
1053
1054 getcon SR, r59
1055 movi SR_BLOCK_EXC, r60
1056 or r59, r60, r59
1057 putcon r59, SR /* SR.BL = 1, keep nesting out */
1058 ld.q SP, FRAME_S(FSSR), r61
1059 ld.q SP, FRAME_S(FSPC), r62
1060 movi SR_ASID_MASK, r60
1061 and r59, r60, r59
1062 andc r61, r60, r61 /* Clear out older ASID */
1063 or r59, r61, r61 /* Retain current ASID */
1064 putcon r61, SSR
1065 putcon r62, SPC
1066
1067 /* Ignore FSYSCALL_ID */
1068
1069 ld.q SP, FRAME_R(59), r59
1070 ld.q SP, FRAME_R(60), r60
1071 ld.q SP, FRAME_R(61), r61
1072 ld.q SP, FRAME_R(62), r62
1073
1074 /* Last touch */
1075 ld.q SP, FRAME_R(15), SP
1076 rte
1077 nop
1078
1079/*
1080 * Third level handlers for VBR-based exceptions. Adapting args to
1081 * and/or deflecting to fourth level handlers.
1082 *
1083 * Fourth level handlers interface.
1084 * Most are C-coded handlers directly pointed by the trap_jtable.
1085 * (Third = Fourth level)
1086 * Inputs:
1087 * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
1088 * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1089 * (r3) struct pt_regs *, original register's frame pointer
1090 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1091 * (r5) TRA control register (for syscall/debug benefit only)
1092 * (LINK) return address
1093 * (SP) = r3
1094 *
1095 * Kernel TLB fault handlers will get a slightly different interface.
1096 * (r2) struct pt_regs *, original register's frame pointer
1097 * (r3) writeaccess, whether it's a store fault as opposed to load fault
1098 * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault
1099 * (r5) Effective Address of fault
1100 * (LINK) return address
1101 * (SP) = r2
1102 *
1103 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1104 *
1105 */
1106tlb_miss_load:
1107 or SP, ZERO, r2
1108 or ZERO, ZERO, r3 /* Read */
1109 or ZERO, ZERO, r4 /* Data */
1110 getcon TEA, r5
1111 pta call_do_page_fault, tr0
1112 beq ZERO, ZERO, tr0
1113
1114tlb_miss_store:
1115 or SP, ZERO, r2
1116 movi 1, r3 /* Write */
1117 or ZERO, ZERO, r4 /* Data */
1118 getcon TEA, r5
1119 pta call_do_page_fault, tr0
1120 beq ZERO, ZERO, tr0
1121
1122itlb_miss_or_IRQ:
1123 pta its_IRQ, tr0
1124 beqi/u r4, EVENT_INTERRUPT, tr0
1125 or SP, ZERO, r2
1126 or ZERO, ZERO, r3 /* Read */
1127 movi 1, r4 /* Text */
1128 getcon TEA, r5
1129 /* Fall through */
1130
1131call_do_page_fault:
1132 movi do_page_fault, r6
1133 ptabs r6, tr0
1134 blink tr0, ZERO
1135
1136fpu_error_or_IRQA:
1137 pta its_IRQ, tr0
1138 beqi/l r4, EVENT_INTERRUPT, tr0
1139#ifdef CONFIG_SH_FPU
1140 movi do_fpu_state_restore, r6
1141#else
1142 movi do_exception_error, r6
1143#endif
1144 ptabs r6, tr0
1145 blink tr0, ZERO
1146
1147fpu_error_or_IRQB:
1148 pta its_IRQ, tr0
1149 beqi/l r4, EVENT_INTERRUPT, tr0
1150#ifdef CONFIG_SH_FPU
1151 movi do_fpu_state_restore, r6
1152#else
1153 movi do_exception_error, r6
1154#endif
1155 ptabs r6, tr0
1156 blink tr0, ZERO
1157
1158its_IRQ:
1159 movi do_IRQ, r6
1160 ptabs r6, tr0
1161 blink tr0, ZERO
1162
1163/*
1164 * system_call/unknown_trap third level handler:
1165 *
1166 * Inputs:
1167 * (r2) fault/interrupt code, entry number (TRAP = 11)
1168 * (r3) struct pt_regs *, original register's frame pointer
1169 * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1170 * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1171 * (SP) = r3
1172 * (LINK) return address: ret_from_exception
1173 * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1174 *
1175 * Outputs:
1176 * (*r3) Syscall reply (Saved r2)
1177 * (LINK) In case of syscall only it can be scrapped.
1178 * Common second level post handler will be ret_from_syscall.
1179 * Common (non-trace) exit point to that is syscall_ret (saving
1180 * result to r2). Common bad exit point is syscall_bad (returning
1181 * ENOSYS then saved to r2).
1182 *
1183 */
1184
1185unknown_trap:
1186 /* Unknown Trap or User Trace */
1187 movi do_unknown_trapa, r6
1188 ptabs r6, tr0
1189 ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
1190 andi r2, 0x1ff, r2 /* r2 = syscall # */
1191 blink tr0, LINK
1192
1193 pta syscall_ret, tr0
1194 blink tr0, ZERO
1195
1196 /* New syscall implementation*/
1197system_call:
1198 pta unknown_trap, tr0
1199 or r5, ZERO, r4 /* TRA (=r5) -> r4 */
1200 shlri r4, 20, r4
1201 bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
1202
1203 /* It's a system call */
1204 st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
1205 andi r5, 0x1ff, r5 /* syscall # -> r5 */
1206
1207 STI()
1208
1209 pta syscall_allowed, tr0
1210 movi NR_syscalls - 1, r4 /* Last valid */
1211 bgeu/l r4, r5, tr0
1212
1213syscall_bad:
1214 /* Return ENOSYS ! */
1215 movi -(ENOSYS), r2 /* Fall-through */
1216
1217 .global syscall_ret
1218syscall_ret:
1219 st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
1220
1221#ifdef CONFIG_POOR_MANS_STRACE
1222 /* nothing useful in registers at this point */
1223
1224 movi evt_debug2, r5
1225 ori r5, 1, r5
1226 ptabs r5, tr0
1227 ld.q SP, FRAME_R(9), r2
1228 or SP, ZERO, r3
1229 blink tr0, LINK
1230#endif
1231
1232 ld.q SP, FRAME_S(FSPC), r2
1233 addi r2, 4, r2 /* Move PC, being pre-execution event */
1234 st.q SP, FRAME_S(FSPC), r2
1235 pta ret_from_syscall, tr0
1236 blink tr0, ZERO
1237
1238
1239/* A different return path for ret_from_fork, because we now need
1240 * to call schedule_tail with the later kernels. Because prev is
1241 * loaded into r2 by switch_to() means we can just call it straight away
1242 */
1243
1244.global ret_from_fork
1245ret_from_fork:
1246
1247 movi schedule_tail,r5
1248 ori r5, 1, r5
1249 ptabs r5, tr0
1250 blink tr0, LINK
1251
1252#ifdef CONFIG_POOR_MANS_STRACE
1253 /* nothing useful in registers at this point */
1254
1255 movi evt_debug2, r5
1256 ori r5, 1, r5
1257 ptabs r5, tr0
1258 ld.q SP, FRAME_R(9), r2
1259 or SP, ZERO, r3
1260 blink tr0, LINK
1261#endif
1262
1263 ld.q SP, FRAME_S(FSPC), r2
1264 addi r2, 4, r2 /* Move PC, being pre-execution event */
1265 st.q SP, FRAME_S(FSPC), r2
1266 pta ret_from_syscall, tr0
1267 blink tr0, ZERO
1268
1269
1270
1271syscall_allowed:
1272 /* Use LINK to deflect the exit point, default is syscall_ret */
1273 pta syscall_ret, tr0
1274 gettr tr0, LINK
1275 pta syscall_notrace, tr0
1276
1277 getcon KCR0, r2
1278 ld.l r2, TI_FLAGS, r4
1279 movi (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT), r6
1280 and r6, r4, r6
1281 beq/l r6, ZERO, tr0
1282
1283 /* Trace it by calling syscall_trace before and after */
1284 movi syscall_trace, r4
1285 or SP, ZERO, r2
1286 or ZERO, ZERO, r3
1287 ptabs r4, tr0
1288 blink tr0, LINK
1289
1290 /* Reload syscall number as r5 is trashed by syscall_trace */
1291 ld.q SP, FRAME_S(FSYSCALL_ID), r5
1292 andi r5, 0x1ff, r5
1293
1294 pta syscall_ret_trace, tr0
1295 gettr tr0, LINK
1296
1297syscall_notrace:
1298 /* Now point to the appropriate 4th level syscall handler */
1299 movi sys_call_table, r4
1300 shlli r5, 2, r5
1301 ldx.l r4, r5, r5
1302 ptabs r5, tr0
1303
1304 /* Prepare original args */
1305 ld.q SP, FRAME_R(2), r2
1306 ld.q SP, FRAME_R(3), r3
1307 ld.q SP, FRAME_R(4), r4
1308 ld.q SP, FRAME_R(5), r5
1309 ld.q SP, FRAME_R(6), r6
1310 ld.q SP, FRAME_R(7), r7
1311
1312 /* And now the trick for those syscalls requiring regs * ! */
1313 or SP, ZERO, r8
1314
1315 /* Call it */
1316 blink tr0, ZERO /* LINK is already properly set */
1317
1318syscall_ret_trace:
1319 /* We get back here only if under trace */
1320 st.q SP, FRAME_R(9), r2 /* Save return value */
1321
1322 movi syscall_trace, LINK
1323 or SP, ZERO, r2
1324 movi 1, r3
1325 ptabs LINK, tr0
1326 blink tr0, LINK
1327
1328 /* This needs to be done after any syscall tracing */
1329 ld.q SP, FRAME_S(FSPC), r2
1330 addi r2, 4, r2 /* Move PC, being pre-execution event */
1331 st.q SP, FRAME_S(FSPC), r2
1332
1333 pta ret_from_syscall, tr0
1334 blink tr0, ZERO /* Resume normal return sequence */
1335
1336/*
1337 * --- Switch to running under a particular ASID and return the previous ASID value
1338 * --- The caller is assumed to have done a cli before calling this.
1339 *
1340 * Input r2 : new ASID
1341 * Output r2 : old ASID
1342 */
1343
1344 .global switch_and_save_asid
1345switch_and_save_asid:
1346 getcon sr, r0
1347 movi 255, r4
1348 shlli r4, 16, r4 /* r4 = mask to select ASID */
1349 and r0, r4, r3 /* r3 = shifted old ASID */
1350 andi r2, 255, r2 /* mask down new ASID */
1351 shlli r2, 16, r2 /* align new ASID against SR.ASID */
1352 andc r0, r4, r0 /* efface old ASID from SR */
1353 or r0, r2, r0 /* insert the new ASID */
1354 putcon r0, ssr
1355 movi 1f, r0
1356 putcon r0, spc
1357 rte
1358 nop
13591:
1360 ptabs LINK, tr0
1361 shlri r3, 16, r2 /* r2 = old ASID */
1362 blink tr0, r63
1363
1364 .global route_to_panic_handler
1365route_to_panic_handler:
1366 /* Switch to real mode, goto panic_handler, don't return. Useful for
1367 last-chance debugging, e.g. if no output wants to go to the console.
1368 */
1369
1370 movi panic_handler - CONFIG_PAGE_OFFSET, r1
1371 ptabs r1, tr0
1372 pta 1f, tr1
1373 gettr tr1, r0
1374 putcon r0, spc
1375 getcon sr, r0
1376 movi 1, r1
1377 shlli r1, 31, r1
1378 andc r0, r1, r0
1379 putcon r0, ssr
1380 rte
1381 nop
13821: /* Now in real mode */
1383 blink tr0, r63
1384 nop
1385
1386 .global peek_real_address_q
1387peek_real_address_q:
1388 /* Two args:
1389 r2 : real mode address to peek
1390 r2(out) : result quadword
1391
1392 This is provided as a cheapskate way of manipulating device
1393 registers for debugging (to avoid the need to onchip_remap the debug
1394 module, and to avoid the need to onchip_remap the watchpoint
1395 controller in a way that identity maps sufficient bits to avoid the
1396 SH5-101 cut2 silicon defect).
1397
1398 This code is not performance critical
1399 */
1400
1401 add.l r2, r63, r2 /* sign extend address */
1402 getcon sr, r0 /* r0 = saved original SR */
1403 movi 1, r1
1404 shlli r1, 28, r1
1405 or r0, r1, r1 /* r0 with block bit set */
1406 putcon r1, sr /* now in critical section */
1407 movi 1, r36
1408 shlli r36, 31, r36
1409 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1410
1411 putcon r1, ssr
1412 movi .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1413 movi 1f, r37 /* virtual mode return addr */
1414 putcon r36, spc
1415
1416 synco
1417 rte
1418 nop
1419
1420.peek0: /* come here in real mode, don't touch caches!!
1421 still in critical section (sr.bl==1) */
1422 putcon r0, ssr
1423 putcon r37, spc
1424 /* Here's the actual peek. If the address is bad, all bets are now off
1425 * what will happen (handlers invoked in real-mode = bad news) */
1426 ld.q r2, 0, r2
1427 synco
1428 rte /* Back to virtual mode */
1429 nop
1430
14311:
1432 ptabs LINK, tr0
1433 blink tr0, r63
1434
1435 .global poke_real_address_q
1436poke_real_address_q:
1437 /* Two args:
1438 r2 : real mode address to poke
1439 r3 : quadword value to write.
1440
1441 This is provided as a cheapskate way of manipulating device
1442 registers for debugging (to avoid the need to onchip_remap the debug
1443 module, and to avoid the need to onchip_remap the watchpoint
1444 controller in a way that identity maps sufficient bits to avoid the
1445 SH5-101 cut2 silicon defect).
1446
1447 This code is not performance critical
1448 */
1449
1450 add.l r2, r63, r2 /* sign extend address */
1451 getcon sr, r0 /* r0 = saved original SR */
1452 movi 1, r1
1453 shlli r1, 28, r1
1454 or r0, r1, r1 /* r0 with block bit set */
1455 putcon r1, sr /* now in critical section */
1456 movi 1, r36
1457 shlli r36, 31, r36
1458 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1459
1460 putcon r1, ssr
1461 movi .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1462 movi 1f, r37 /* virtual mode return addr */
1463 putcon r36, spc
1464
1465 synco
1466 rte
1467 nop
1468
1469.poke0: /* come here in real mode, don't touch caches!!
1470 still in critical section (sr.bl==1) */
1471 putcon r0, ssr
1472 putcon r37, spc
1473 /* Here's the actual poke. If the address is bad, all bets are now off
1474 * what will happen (handlers invoked in real-mode = bad news) */
1475 st.q r2, 0, r3
1476 synco
1477 rte /* Back to virtual mode */
1478 nop
1479
14801:
1481 ptabs LINK, tr0
1482 blink tr0, r63
1483
1484/*
1485 * --- User Access Handling Section
1486 */
1487
1488/*
1489 * User Access support. It all moved to non inlined Assembler
1490 * functions in here.
1491 *
1492 * __kernel_size_t __copy_user(void *__to, const void *__from,
1493 * __kernel_size_t __n)
1494 *
1495 * Inputs:
1496 * (r2) target address
1497 * (r3) source address
1498 * (r4) size in bytes
1499 *
1500 * Ouputs:
1501 * (*r2) target data
1502 * (r2) non-copied bytes
1503 *
1504 * If a fault occurs on the user pointer, bail out early and return the
1505 * number of bytes not copied in r2.
1506 * Strategy : for large blocks, call a real memcpy function which can
1507 * move >1 byte at a time using unaligned ld/st instructions, and can
1508 * manipulate the cache using prefetch + alloco to improve the speed
1509 * further. If a fault occurs in that function, just revert to the
1510 * byte-by-byte approach used for small blocks; this is rare so the
1511 * performance hit for that case does not matter.
1512 *
1513 * For small blocks it's not worth the overhead of setting up and calling
1514 * the memcpy routine; do the copy a byte at a time.
1515 *
1516 */
1517 .global __copy_user
1518__copy_user:
1519 pta __copy_user_byte_by_byte, tr1
1520 movi 16, r0 ! this value is a best guess, should tune it by benchmarking
1521 bge/u r0, r4, tr1
1522 pta copy_user_memcpy, tr0
1523 addi SP, -32, SP
1524 /* Save arguments in case we have to fix-up unhandled page fault */
1525 st.q SP, 0, r2
1526 st.q SP, 8, r3
1527 st.q SP, 16, r4
1528 st.q SP, 24, r35 ! r35 is callee-save
1529 /* Save LINK in a register to reduce RTS time later (otherwise
1530 ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1531 ori LINK, 0, r35
1532 blink tr0, LINK
1533
1534 /* Copy completed normally if we get back here */
1535 ptabs r35, tr0
1536 ld.q SP, 24, r35
1537 /* don't restore r2-r4, pointless */
1538 /* set result=r2 to zero as the copy must have succeeded. */
1539 or r63, r63, r2
1540 addi SP, 32, SP
1541 blink tr0, r63 ! RTS
1542
1543 .global __copy_user_fixup
1544__copy_user_fixup:
1545 /* Restore stack frame */
1546 ori r35, 0, LINK
1547 ld.q SP, 24, r35
1548 ld.q SP, 16, r4
1549 ld.q SP, 8, r3
1550 ld.q SP, 0, r2
1551 addi SP, 32, SP
1552 /* Fall through to original code, in the 'same' state we entered with */
1553
1554/* The slow byte-by-byte method is used if the fast copy traps due to a bad
1555 user address. In that rare case, the speed drop can be tolerated. */
1556__copy_user_byte_by_byte:
1557 pta ___copy_user_exit, tr1
1558 pta ___copy_user1, tr0
1559 beq/u r4, r63, tr1 /* early exit for zero length copy */
1560 sub r2, r3, r0
1561 addi r0, -1, r0
1562
1563___copy_user1:
1564 ld.b r3, 0, r5 /* Fault address 1 */
1565
1566 /* Could rewrite this to use just 1 add, but the second comes 'free'
1567 due to load latency */
1568 addi r3, 1, r3
1569 addi r4, -1, r4 /* No real fixup required */
1570___copy_user2:
1571 stx.b r3, r0, r5 /* Fault address 2 */
1572 bne r4, ZERO, tr0
1573
1574___copy_user_exit:
1575 or r4, ZERO, r2
1576 ptabs LINK, tr0
1577 blink tr0, ZERO
1578
1579/*
1580 * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1581 *
1582 * Inputs:
1583 * (r2) target address
1584 * (r3) size in bytes
1585 *
1586 * Ouputs:
1587 * (*r2) zero-ed target data
1588 * (r2) non-zero-ed bytes
1589 */
1590 .global __clear_user
1591__clear_user:
1592 pta ___clear_user_exit, tr1
1593 pta ___clear_user1, tr0
1594 beq/u r3, r63, tr1
1595
1596___clear_user1:
1597 st.b r2, 0, ZERO /* Fault address */
1598 addi r2, 1, r2
1599 addi r3, -1, r3 /* No real fixup required */
1600 bne r3, ZERO, tr0
1601
1602___clear_user_exit:
1603 or r3, ZERO, r2
1604 ptabs LINK, tr0
1605 blink tr0, ZERO
1606
1607
1608/*
1609 * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
1610 * int __count)
1611 *
1612 * Inputs:
1613 * (r2) target address
1614 * (r3) source address
1615 * (r4) maximum size in bytes
1616 *
1617 * Ouputs:
1618 * (*r2) copied data
1619 * (r2) -EFAULT (in case of faulting)
1620 * copied data (otherwise)
1621 */
1622 .global __strncpy_from_user
1623__strncpy_from_user:
1624 pta ___strncpy_from_user1, tr0
1625 pta ___strncpy_from_user_done, tr1
1626 or r4, ZERO, r5 /* r5 = original count */
1627 beq/u r4, r63, tr1 /* early exit if r4==0 */
1628 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1629 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1630
1631___strncpy_from_user1:
1632 ld.b r3, 0, r7 /* Fault address: only in reading */
1633 st.b r2, 0, r7
1634 addi r2, 1, r2
1635 addi r3, 1, r3
1636 beq/u ZERO, r7, tr1
1637 addi r4, -1, r4 /* return real number of copied bytes */
1638 bne/l ZERO, r4, tr0
1639
1640___strncpy_from_user_done:
1641 sub r5, r4, r6 /* If done, return copied */
1642
1643___strncpy_from_user_exit:
1644 or r6, ZERO, r2
1645 ptabs LINK, tr0
1646 blink tr0, ZERO
1647
1648/*
1649 * extern long __strnlen_user(const char *__s, long __n)
1650 *
1651 * Inputs:
1652 * (r2) source address
1653 * (r3) source size in bytes
1654 *
1655 * Ouputs:
1656 * (r2) -EFAULT (in case of faulting)
1657 * string length (otherwise)
1658 */
1659 .global __strnlen_user
1660__strnlen_user:
1661 pta ___strnlen_user_set_reply, tr0
1662 pta ___strnlen_user1, tr1
1663 or ZERO, ZERO, r5 /* r5 = counter */
1664 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1665 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1666 beq r3, ZERO, tr0
1667
1668___strnlen_user1:
1669 ldx.b r2, r5, r7 /* Fault address: only in reading */
1670 addi r3, -1, r3 /* No real fixup */
1671 addi r5, 1, r5
1672 beq r3, ZERO, tr0
1673 bne r7, ZERO, tr1
1674! The line below used to be active. This meant led to a junk byte lying between each pair
1675! of entries in the argv & envp structures in memory. Whilst the program saw the right data
1676! via the argv and envp arguments to main, it meant the 'flat' representation visible through
1677! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
1678! addi r5, 1, r5 /* Include '\0' */
1679
1680___strnlen_user_set_reply:
1681 or r5, ZERO, r6 /* If done, return counter */
1682
1683___strnlen_user_exit:
1684 or r6, ZERO, r2
1685 ptabs LINK, tr0
1686 blink tr0, ZERO
1687
1688/*
1689 * extern long __get_user_asm_?(void *val, long addr)
1690 *
1691 * Inputs:
1692 * (r2) dest address
1693 * (r3) source address (in User Space)
1694 *
1695 * Ouputs:
1696 * (r2) -EFAULT (faulting)
1697 * 0 (not faulting)
1698 */
1699 .global __get_user_asm_b
1700__get_user_asm_b:
1701 or r2, ZERO, r4
1702 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1703
1704___get_user_asm_b1:
1705 ld.b r3, 0, r5 /* r5 = data */
1706 st.b r4, 0, r5
1707 or ZERO, ZERO, r2
1708
1709___get_user_asm_b_exit:
1710 ptabs LINK, tr0
1711 blink tr0, ZERO
1712
1713
1714 .global __get_user_asm_w
1715__get_user_asm_w:
1716 or r2, ZERO, r4
1717 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1718
1719___get_user_asm_w1:
1720 ld.w r3, 0, r5 /* r5 = data */
1721 st.w r4, 0, r5
1722 or ZERO, ZERO, r2
1723
1724___get_user_asm_w_exit:
1725 ptabs LINK, tr0
1726 blink tr0, ZERO
1727
1728
1729 .global __get_user_asm_l
1730__get_user_asm_l:
1731 or r2, ZERO, r4
1732 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1733
1734___get_user_asm_l1:
1735 ld.l r3, 0, r5 /* r5 = data */
1736 st.l r4, 0, r5
1737 or ZERO, ZERO, r2
1738
1739___get_user_asm_l_exit:
1740 ptabs LINK, tr0
1741 blink tr0, ZERO
1742
1743
1744 .global __get_user_asm_q
1745__get_user_asm_q:
1746 or r2, ZERO, r4
1747 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1748
1749___get_user_asm_q1:
1750 ld.q r3, 0, r5 /* r5 = data */
1751 st.q r4, 0, r5
1752 or ZERO, ZERO, r2
1753
1754___get_user_asm_q_exit:
1755 ptabs LINK, tr0
1756 blink tr0, ZERO
1757
1758/*
1759 * extern long __put_user_asm_?(void *pval, long addr)
1760 *
1761 * Inputs:
1762 * (r2) kernel pointer to value
1763 * (r3) dest address (in User Space)
1764 *
1765 * Ouputs:
1766 * (r2) -EFAULT (faulting)
1767 * 0 (not faulting)
1768 */
1769 .global __put_user_asm_b
1770__put_user_asm_b:
1771 ld.b r2, 0, r4 /* r4 = data */
1772 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1773
1774___put_user_asm_b1:
1775 st.b r3, 0, r4
1776 or ZERO, ZERO, r2
1777
1778___put_user_asm_b_exit:
1779 ptabs LINK, tr0
1780 blink tr0, ZERO
1781
1782
1783 .global __put_user_asm_w
1784__put_user_asm_w:
1785 ld.w r2, 0, r4 /* r4 = data */
1786 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1787
1788___put_user_asm_w1:
1789 st.w r3, 0, r4
1790 or ZERO, ZERO, r2
1791
1792___put_user_asm_w_exit:
1793 ptabs LINK, tr0
1794 blink tr0, ZERO
1795
1796
1797 .global __put_user_asm_l
1798__put_user_asm_l:
1799 ld.l r2, 0, r4 /* r4 = data */
1800 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1801
1802___put_user_asm_l1:
1803 st.l r3, 0, r4
1804 or ZERO, ZERO, r2
1805
1806___put_user_asm_l_exit:
1807 ptabs LINK, tr0
1808 blink tr0, ZERO
1809
1810
1811 .global __put_user_asm_q
1812__put_user_asm_q:
1813 ld.q r2, 0, r4 /* r4 = data */
1814 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1815
1816___put_user_asm_q1:
1817 st.q r3, 0, r4
1818 or ZERO, ZERO, r2
1819
1820___put_user_asm_q_exit:
1821 ptabs LINK, tr0
1822 blink tr0, ZERO
1823
1824panic_stash_regs:
1825 /* The idea is : when we get an unhandled panic, we dump the registers
1826 to a known memory location, the just sit in a tight loop.
1827 This allows the human to look at the memory region through the GDB
1828 session (assuming the debug module's SHwy initiator isn't locked up
1829 or anything), to hopefully analyze the cause of the panic. */
1830
1831 /* On entry, former r15 (SP) is in DCR
1832 former r0 is at resvec_saved_area + 0
1833 former r1 is at resvec_saved_area + 8
1834 former tr0 is at resvec_saved_area + 32
1835 DCR is the only register whose value is lost altogether.
1836 */
1837
1838 movi 0xffffffff80000000, r0 ! phy of dump area
1839 ld.q SP, 0x000, r1 ! former r0
1840 st.q r0, 0x000, r1
1841 ld.q SP, 0x008, r1 ! former r1
1842 st.q r0, 0x008, r1
1843 st.q r0, 0x010, r2
1844 st.q r0, 0x018, r3
1845 st.q r0, 0x020, r4
1846 st.q r0, 0x028, r5
1847 st.q r0, 0x030, r6
1848 st.q r0, 0x038, r7
1849 st.q r0, 0x040, r8
1850 st.q r0, 0x048, r9
1851 st.q r0, 0x050, r10
1852 st.q r0, 0x058, r11
1853 st.q r0, 0x060, r12
1854 st.q r0, 0x068, r13
1855 st.q r0, 0x070, r14
1856 getcon dcr, r14
1857 st.q r0, 0x078, r14
1858 st.q r0, 0x080, r16
1859 st.q r0, 0x088, r17
1860 st.q r0, 0x090, r18
1861 st.q r0, 0x098, r19
1862 st.q r0, 0x0a0, r20
1863 st.q r0, 0x0a8, r21
1864 st.q r0, 0x0b0, r22
1865 st.q r0, 0x0b8, r23
1866 st.q r0, 0x0c0, r24
1867 st.q r0, 0x0c8, r25
1868 st.q r0, 0x0d0, r26
1869 st.q r0, 0x0d8, r27
1870 st.q r0, 0x0e0, r28
1871 st.q r0, 0x0e8, r29
1872 st.q r0, 0x0f0, r30
1873 st.q r0, 0x0f8, r31
1874 st.q r0, 0x100, r32
1875 st.q r0, 0x108, r33
1876 st.q r0, 0x110, r34
1877 st.q r0, 0x118, r35
1878 st.q r0, 0x120, r36
1879 st.q r0, 0x128, r37
1880 st.q r0, 0x130, r38
1881 st.q r0, 0x138, r39
1882 st.q r0, 0x140, r40
1883 st.q r0, 0x148, r41
1884 st.q r0, 0x150, r42
1885 st.q r0, 0x158, r43
1886 st.q r0, 0x160, r44
1887 st.q r0, 0x168, r45
1888 st.q r0, 0x170, r46
1889 st.q r0, 0x178, r47
1890 st.q r0, 0x180, r48
1891 st.q r0, 0x188, r49
1892 st.q r0, 0x190, r50
1893 st.q r0, 0x198, r51
1894 st.q r0, 0x1a0, r52
1895 st.q r0, 0x1a8, r53
1896 st.q r0, 0x1b0, r54
1897 st.q r0, 0x1b8, r55
1898 st.q r0, 0x1c0, r56
1899 st.q r0, 0x1c8, r57
1900 st.q r0, 0x1d0, r58
1901 st.q r0, 0x1d8, r59
1902 st.q r0, 0x1e0, r60
1903 st.q r0, 0x1e8, r61
1904 st.q r0, 0x1f0, r62
1905 st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
1906
1907 ld.q SP, 0x020, r1 ! former tr0
1908 st.q r0, 0x200, r1
1909 gettr tr1, r1
1910 st.q r0, 0x208, r1
1911 gettr tr2, r1
1912 st.q r0, 0x210, r1
1913 gettr tr3, r1
1914 st.q r0, 0x218, r1
1915 gettr tr4, r1
1916 st.q r0, 0x220, r1
1917 gettr tr5, r1
1918 st.q r0, 0x228, r1
1919 gettr tr6, r1
1920 st.q r0, 0x230, r1
1921 gettr tr7, r1
1922 st.q r0, 0x238, r1
1923
1924 getcon sr, r1
1925 getcon ssr, r2
1926 getcon pssr, r3
1927 getcon spc, r4
1928 getcon pspc, r5
1929 getcon intevt, r6
1930 getcon expevt, r7
1931 getcon pexpevt, r8
1932 getcon tra, r9
1933 getcon tea, r10
1934 getcon kcr0, r11
1935 getcon kcr1, r12
1936 getcon vbr, r13
1937 getcon resvec, r14
1938
1939 st.q r0, 0x240, r1
1940 st.q r0, 0x248, r2
1941 st.q r0, 0x250, r3
1942 st.q r0, 0x258, r4
1943 st.q r0, 0x260, r5
1944 st.q r0, 0x268, r6
1945 st.q r0, 0x270, r7
1946 st.q r0, 0x278, r8
1947 st.q r0, 0x280, r9
1948 st.q r0, 0x288, r10
1949 st.q r0, 0x290, r11
1950 st.q r0, 0x298, r12
1951 st.q r0, 0x2a0, r13
1952 st.q r0, 0x2a8, r14
1953
1954 getcon SPC,r2
1955 getcon SSR,r3
1956 getcon EXPEVT,r4
1957 /* Prepare to jump to C - physical address */
1958 movi panic_handler-CONFIG_PAGE_OFFSET, r1
1959 ori r1, 1, r1
1960 ptabs r1, tr0
1961 getcon DCR, SP
1962 blink tr0, ZERO
1963 nop
1964 nop
1965 nop
1966 nop
1967
1968
1969
1970
1971/*
1972 * --- Signal Handling Section
1973 */
1974
1975/*
1976 * extern long long _sa_default_rt_restorer
1977 * extern long long _sa_default_restorer
1978 *
1979 * or, better,
1980 *
1981 * extern void _sa_default_rt_restorer(void)
1982 * extern void _sa_default_restorer(void)
1983 *
1984 * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
1985 * from user space. Copied into user space by signal management.
1986 * Both must be quad aligned and 2 quad long (4 instructions).
1987 *
1988 */
1989 .balign 8
1990 .global sa_default_rt_restorer
1991sa_default_rt_restorer:
1992 movi 0x10, r9
1993 shori __NR_rt_sigreturn, r9
1994 trapa r9
1995 nop
1996
1997 .balign 8
1998 .global sa_default_restorer
1999sa_default_restorer:
2000 movi 0x10, r9
2001 shori __NR_sigreturn, r9
2002 trapa r9
2003 nop
2004
2005/*
2006 * --- __ex_table Section
2007 */
2008
2009/*
2010 * User Access Exception Table.
2011 */
2012 .section __ex_table, "a"
2013
2014 .global asm_uaccess_start /* Just a marker */
2015asm_uaccess_start:
2016
2017 .long ___copy_user1, ___copy_user_exit
2018 .long ___copy_user2, ___copy_user_exit
2019 .long ___clear_user1, ___clear_user_exit
2020 .long ___strncpy_from_user1, ___strncpy_from_user_exit
2021 .long ___strnlen_user1, ___strnlen_user_exit
2022 .long ___get_user_asm_b1, ___get_user_asm_b_exit
2023 .long ___get_user_asm_w1, ___get_user_asm_w_exit
2024 .long ___get_user_asm_l1, ___get_user_asm_l_exit
2025 .long ___get_user_asm_q1, ___get_user_asm_q_exit
2026 .long ___put_user_asm_b1, ___put_user_asm_b_exit
2027 .long ___put_user_asm_w1, ___put_user_asm_w_exit
2028 .long ___put_user_asm_l1, ___put_user_asm_l_exit
2029 .long ___put_user_asm_q1, ___put_user_asm_q_exit
2030
2031 .global asm_uaccess_end /* Just a marker */
2032asm_uaccess_end:
2033
2034
2035
2036
2037/*
2038 * --- .text.init Section
2039 */
2040
2041 .section .text.init, "ax"
2042
2043/*
2044 * void trap_init (void)
2045 *
2046 */
2047 .global trap_init
2048trap_init:
2049 addi SP, -24, SP /* Room to save r28/r29/r30 */
2050 st.q SP, 0, r28
2051 st.q SP, 8, r29
2052 st.q SP, 16, r30
2053
2054 /* Set VBR and RESVEC */
2055 movi LVBR_block, r19
2056 andi r19, -4, r19 /* reset MMUOFF + reserved */
2057 /* For RESVEC exceptions we force the MMU off, which means we need the
2058 physical address. */
2059 movi LRESVEC_block-CONFIG_PAGE_OFFSET, r20
2060 andi r20, -4, r20 /* reset reserved */
2061 ori r20, 1, r20 /* set MMUOFF */
2062 putcon r19, VBR
2063 putcon r20, RESVEC
2064
2065 /* Sanity check */
2066 movi LVBR_block_end, r21
2067 andi r21, -4, r21
2068 movi BLOCK_SIZE, r29 /* r29 = expected size */
2069 or r19, ZERO, r30
2070 add r19, r29, r19
2071
2072 /*
2073 * Ugly, but better loop forever now than crash afterwards.
2074 * We should print a message, but if we touch LVBR or
2075 * LRESVEC blocks we should not be surprised if we get stuck
2076 * in trap_init().
2077 */
2078 pta trap_init_loop, tr1
2079 gettr tr1, r28 /* r28 = trap_init_loop */
2080 sub r21, r30, r30 /* r30 = actual size */
2081
2082 /*
2083 * VBR/RESVEC handlers overlap by being bigger than
2084 * allowed. Very bad. Just loop forever.
2085 * (r28) panic/loop address
2086 * (r29) expected size
2087 * (r30) actual size
2088 */
2089trap_init_loop:
2090 bne r19, r21, tr1
2091
2092 /* Now that exception vectors are set up reset SR.BL */
2093 getcon SR, r22
2094 movi SR_UNBLOCK_EXC, r23
2095 and r22, r23, r22
2096 putcon r22, SR
2097
2098 addi SP, 24, SP
2099 ptabs LINK, tr0
2100 blink tr0, ZERO
2101