diff options
author | Paul Mundt <lethal@linux-sh.org> | 2007-11-11 04:11:18 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-01-27 23:18:46 -0500 |
commit | 343ac72248d360f1fae72176aca1117be19189ec (patch) | |
tree | f9f100f3e16aea8986e5054a4da26d69ecd954ca /arch/sh/kernel/cpu/sh5 | |
parent | 49e6c3e7460a718242dc11b801811f0ac6892154 (diff) |
sh: Move over the SH-5 entry.S.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/cpu/sh5')
-rw-r--r-- | arch/sh/kernel/cpu/sh5/Makefile | 1 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh5/entry.S | 2100 |
2 files changed, 2101 insertions, 0 deletions
diff --git a/arch/sh/kernel/cpu/sh5/Makefile b/arch/sh/kernel/cpu/sh5/Makefile new file mode 100644 index 000000000000..9778f9bdff3a --- /dev/null +++ b/arch/sh/kernel/cpu/sh5/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-y := entry.o | |||
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S new file mode 100644 index 000000000000..2f505a7cb5f9 --- /dev/null +++ b/arch/sh/kernel/cpu/sh5/entry.S | |||
@@ -0,0 +1,2100 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/kernel/entry.S | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Copyright (C) 2004, 2005 Paul Mundt | ||
10 | * Copyright (C) 2003, 2004 Richard Curnow | ||
11 | * | ||
12 | */ | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/sys.h> | ||
15 | #include <asm/cpu/registers.h> | ||
16 | #include <asm/processor.h> | ||
17 | #include <asm/unistd.h> | ||
18 | #include <asm/thread_info.h> | ||
19 | #include <asm/asm-offsets.h> | ||
20 | |||
21 | /* | ||
22 | * SR fields. | ||
23 | */ | ||
24 | #define SR_ASID_MASK 0x00ff0000 | ||
25 | #define SR_FD_MASK 0x00008000 | ||
26 | #define SR_SS 0x08000000 | ||
27 | #define SR_BL 0x10000000 | ||
28 | #define SR_MD 0x40000000 | ||
29 | |||
30 | /* | ||
31 | * Event code. | ||
32 | */ | ||
33 | #define EVENT_INTERRUPT 0 | ||
34 | #define EVENT_FAULT_TLB 1 | ||
35 | #define EVENT_FAULT_NOT_TLB 2 | ||
36 | #define EVENT_DEBUG 3 | ||
37 | |||
38 | /* EXPEVT values */ | ||
39 | #define RESET_CAUSE 0x20 | ||
40 | #define DEBUGSS_CAUSE 0x980 | ||
41 | |||
42 | /* | ||
43 | * Frame layout. Quad index. | ||
44 | */ | ||
45 | #define FRAME_T(x) FRAME_TBASE+(x*8) | ||
46 | #define FRAME_R(x) FRAME_RBASE+(x*8) | ||
47 | #define FRAME_S(x) FRAME_SBASE+(x*8) | ||
48 | #define FSPC 0 | ||
49 | #define FSSR 1 | ||
50 | #define FSYSCALL_ID 2 | ||
51 | |||
52 | /* Arrange the save frame to be a multiple of 32 bytes long */ | ||
53 | #define FRAME_SBASE 0 | ||
54 | #define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */ | ||
55 | #define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */ | ||
56 | #define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */ | ||
57 | #define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */ | ||
58 | |||
59 | #define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */ | ||
60 | #define FP_FRAME_BASE 0 | ||
61 | |||
62 | #define SAVED_R2 0*8 | ||
63 | #define SAVED_R3 1*8 | ||
64 | #define SAVED_R4 2*8 | ||
65 | #define SAVED_R5 3*8 | ||
66 | #define SAVED_R18 4*8 | ||
67 | #define SAVED_R6 5*8 | ||
68 | #define SAVED_TR0 6*8 | ||
69 | |||
70 | /* These are the registers saved in the TLB path that aren't saved in the first | ||
71 | level of the normal one. */ | ||
72 | #define TLB_SAVED_R25 7*8 | ||
73 | #define TLB_SAVED_TR1 8*8 | ||
74 | #define TLB_SAVED_TR2 9*8 | ||
75 | #define TLB_SAVED_TR3 10*8 | ||
76 | #define TLB_SAVED_TR4 11*8 | ||
77 | /* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing | ||
78 | breakage otherwise. */ | ||
79 | #define TLB_SAVED_R0 12*8 | ||
80 | #define TLB_SAVED_R1 13*8 | ||
81 | |||
82 | #define CLI() \ | ||
83 | getcon SR, r6; \ | ||
84 | ori r6, 0xf0, r6; \ | ||
85 | putcon r6, SR; | ||
86 | |||
87 | #define STI() \ | ||
88 | getcon SR, r6; \ | ||
89 | andi r6, ~0xf0, r6; \ | ||
90 | putcon r6, SR; | ||
91 | |||
92 | #ifdef CONFIG_PREEMPT | ||
93 | # define preempt_stop() CLI() | ||
94 | #else | ||
95 | # define preempt_stop() | ||
96 | # define resume_kernel restore_all | ||
97 | #endif | ||
98 | |||
99 | .section .data, "aw" | ||
100 | |||
101 | #define FAST_TLBMISS_STACK_CACHELINES 4 | ||
102 | #define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES) | ||
103 | |||
104 | /* Register back-up area for all exceptions */ | ||
105 | .balign 32 | ||
106 | /* Allow for 16 quadwords to be pushed by fast tlbmiss handling | ||
107 | * register saves etc. */ | ||
108 | .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0 | ||
109 | /* This is 32 byte aligned by construction */ | ||
110 | /* Register back-up area for all exceptions */ | ||
111 | reg_save_area: | ||
112 | .quad 0 | ||
113 | .quad 0 | ||
114 | .quad 0 | ||
115 | .quad 0 | ||
116 | |||
117 | .quad 0 | ||
118 | .quad 0 | ||
119 | .quad 0 | ||
120 | .quad 0 | ||
121 | |||
122 | .quad 0 | ||
123 | .quad 0 | ||
124 | .quad 0 | ||
125 | .quad 0 | ||
126 | |||
127 | .quad 0 | ||
128 | .quad 0 | ||
129 | |||
130 | /* Save area for RESVEC exceptions. We cannot use reg_save_area because of | ||
131 | * reentrancy. Note this area may be accessed via physical address. | ||
132 | * Align so this fits a whole single cache line, for ease of purging. | ||
133 | */ | ||
134 | .balign 32,0,32 | ||
135 | resvec_save_area: | ||
136 | .quad 0 | ||
137 | .quad 0 | ||
138 | .quad 0 | ||
139 | .quad 0 | ||
140 | .quad 0 | ||
141 | .balign 32,0,32 | ||
142 | |||
143 | /* Jump table of 3rd level handlers */ | ||
144 | trap_jtable: | ||
145 | .long do_exception_error /* 0x000 */ | ||
146 | .long do_exception_error /* 0x020 */ | ||
147 | .long tlb_miss_load /* 0x040 */ | ||
148 | .long tlb_miss_store /* 0x060 */ | ||
149 | ! ARTIFICIAL pseudo-EXPEVT setting | ||
150 | .long do_debug_interrupt /* 0x080 */ | ||
151 | .long tlb_miss_load /* 0x0A0 */ | ||
152 | .long tlb_miss_store /* 0x0C0 */ | ||
153 | .long do_address_error_load /* 0x0E0 */ | ||
154 | .long do_address_error_store /* 0x100 */ | ||
155 | #ifdef CONFIG_SH_FPU | ||
156 | .long do_fpu_error /* 0x120 */ | ||
157 | #else | ||
158 | .long do_exception_error /* 0x120 */ | ||
159 | #endif | ||
160 | .long do_exception_error /* 0x140 */ | ||
161 | .long system_call /* 0x160 */ | ||
162 | .long do_reserved_inst /* 0x180 */ | ||
163 | .long do_illegal_slot_inst /* 0x1A0 */ | ||
164 | .long do_NMI /* 0x1C0 */ | ||
165 | .long do_exception_error /* 0x1E0 */ | ||
166 | .rept 15 | ||
167 | .long do_IRQ /* 0x200 - 0x3C0 */ | ||
168 | .endr | ||
169 | .long do_exception_error /* 0x3E0 */ | ||
170 | .rept 32 | ||
171 | .long do_IRQ /* 0x400 - 0x7E0 */ | ||
172 | .endr | ||
173 | .long fpu_error_or_IRQA /* 0x800 */ | ||
174 | .long fpu_error_or_IRQB /* 0x820 */ | ||
175 | .long do_IRQ /* 0x840 */ | ||
176 | .long do_IRQ /* 0x860 */ | ||
177 | .rept 6 | ||
178 | .long do_exception_error /* 0x880 - 0x920 */ | ||
179 | .endr | ||
180 | .long do_software_break_point /* 0x940 */ | ||
181 | .long do_exception_error /* 0x960 */ | ||
182 | .long do_single_step /* 0x980 */ | ||
183 | |||
184 | .rept 3 | ||
185 | .long do_exception_error /* 0x9A0 - 0x9E0 */ | ||
186 | .endr | ||
187 | .long do_IRQ /* 0xA00 */ | ||
188 | .long do_IRQ /* 0xA20 */ | ||
189 | .long itlb_miss_or_IRQ /* 0xA40 */ | ||
190 | .long do_IRQ /* 0xA60 */ | ||
191 | .long do_IRQ /* 0xA80 */ | ||
192 | .long itlb_miss_or_IRQ /* 0xAA0 */ | ||
193 | .long do_exception_error /* 0xAC0 */ | ||
194 | .long do_address_error_exec /* 0xAE0 */ | ||
195 | .rept 8 | ||
196 | .long do_exception_error /* 0xB00 - 0xBE0 */ | ||
197 | .endr | ||
198 | .rept 18 | ||
199 | .long do_IRQ /* 0xC00 - 0xE20 */ | ||
200 | .endr | ||
201 | |||
202 | .section .text64, "ax" | ||
203 | |||
204 | /* | ||
205 | * --- Exception/Interrupt/Event Handling Section | ||
206 | */ | ||
207 | |||
208 | /* | ||
209 | * VBR and RESVEC blocks. | ||
210 | * | ||
211 | * First level handler for VBR-based exceptions. | ||
212 | * | ||
213 | * To avoid waste of space, align to the maximum text block size. | ||
214 | * This is assumed to be at most 128 bytes or 32 instructions. | ||
215 | * DO NOT EXCEED 32 instructions on the first level handlers ! | ||
216 | * | ||
217 | * Also note that RESVEC is contained within the VBR block | ||
218 | * where the room left (1KB - TEXT_SIZE) allows placing | ||
219 | * the RESVEC block (at most 512B + TEXT_SIZE). | ||
220 | * | ||
221 | * So first (and only) level handler for RESVEC-based exceptions. | ||
222 | * | ||
223 | * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss | ||
224 | * and interrupt) we are a lot tight with register space until | ||
225 | * saving onto the stack frame, which is done in handle_exception(). | ||
226 | * | ||
227 | */ | ||
228 | |||
229 | #define TEXT_SIZE 128 | ||
230 | #define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */ | ||
231 | |||
232 | .balign TEXT_SIZE | ||
233 | LVBR_block: | ||
234 | .space 256, 0 /* Power-on class handler, */ | ||
235 | /* not required here */ | ||
236 | not_a_tlb_miss: | ||
237 | synco /* TAKum03020 (but probably a good idea anyway.) */ | ||
238 | /* Save original stack pointer into KCR1 */ | ||
239 | putcon SP, KCR1 | ||
240 | |||
241 | /* Save other original registers into reg_save_area */ | ||
242 | movi reg_save_area, SP | ||
243 | st.q SP, SAVED_R2, r2 | ||
244 | st.q SP, SAVED_R3, r3 | ||
245 | st.q SP, SAVED_R4, r4 | ||
246 | st.q SP, SAVED_R5, r5 | ||
247 | st.q SP, SAVED_R6, r6 | ||
248 | st.q SP, SAVED_R18, r18 | ||
249 | gettr tr0, r3 | ||
250 | st.q SP, SAVED_TR0, r3 | ||
251 | |||
252 | /* Set args for Non-debug, Not a TLB miss class handler */ | ||
253 | getcon EXPEVT, r2 | ||
254 | movi ret_from_exception, r3 | ||
255 | ori r3, 1, r3 | ||
256 | movi EVENT_FAULT_NOT_TLB, r4 | ||
257 | or SP, ZERO, r5 | ||
258 | getcon KCR1, SP | ||
259 | pta handle_exception, tr0 | ||
260 | blink tr0, ZERO | ||
261 | |||
262 | .balign 256 | ||
263 | ! VBR+0x200 | ||
264 | nop | ||
265 | .balign 256 | ||
266 | ! VBR+0x300 | ||
267 | nop | ||
268 | .balign 256 | ||
269 | /* | ||
270 | * Instead of the natural .balign 1024 place RESVEC here | ||
271 | * respecting the final 1KB alignment. | ||
272 | */ | ||
273 | .balign TEXT_SIZE | ||
274 | /* | ||
275 | * Instead of '.space 1024-TEXT_SIZE' place the RESVEC | ||
276 | * block making sure the final alignment is correct. | ||
277 | */ | ||
278 | tlb_miss: | ||
279 | synco /* TAKum03020 (but probably a good idea anyway.) */ | ||
280 | putcon SP, KCR1 | ||
281 | movi reg_save_area, SP | ||
282 | /* SP is guaranteed 32-byte aligned. */ | ||
283 | st.q SP, TLB_SAVED_R0 , r0 | ||
284 | st.q SP, TLB_SAVED_R1 , r1 | ||
285 | st.q SP, SAVED_R2 , r2 | ||
286 | st.q SP, SAVED_R3 , r3 | ||
287 | st.q SP, SAVED_R4 , r4 | ||
288 | st.q SP, SAVED_R5 , r5 | ||
289 | st.q SP, SAVED_R6 , r6 | ||
290 | st.q SP, SAVED_R18, r18 | ||
291 | |||
292 | /* Save R25 for safety; as/ld may want to use it to achieve the call to | ||
293 | * the code in mm/tlbmiss.c */ | ||
294 | st.q SP, TLB_SAVED_R25, r25 | ||
295 | gettr tr0, r2 | ||
296 | gettr tr1, r3 | ||
297 | gettr tr2, r4 | ||
298 | gettr tr3, r5 | ||
299 | gettr tr4, r18 | ||
300 | st.q SP, SAVED_TR0 , r2 | ||
301 | st.q SP, TLB_SAVED_TR1 , r3 | ||
302 | st.q SP, TLB_SAVED_TR2 , r4 | ||
303 | st.q SP, TLB_SAVED_TR3 , r5 | ||
304 | st.q SP, TLB_SAVED_TR4 , r18 | ||
305 | |||
306 | pt do_fast_page_fault, tr0 | ||
307 | getcon SSR, r2 | ||
308 | getcon EXPEVT, r3 | ||
309 | getcon TEA, r4 | ||
310 | shlri r2, 30, r2 | ||
311 | andi r2, 1, r2 /* r2 = SSR.MD */ | ||
312 | blink tr0, LINK | ||
313 | |||
314 | pt fixup_to_invoke_general_handler, tr1 | ||
315 | |||
316 | /* If the fast path handler fixed the fault, just drop through quickly | ||
317 | to the restore code right away to return to the excepting context. | ||
318 | */ | ||
319 | beqi/u r2, 0, tr1 | ||
320 | |||
321 | fast_tlb_miss_restore: | ||
322 | ld.q SP, SAVED_TR0, r2 | ||
323 | ld.q SP, TLB_SAVED_TR1, r3 | ||
324 | ld.q SP, TLB_SAVED_TR2, r4 | ||
325 | |||
326 | ld.q SP, TLB_SAVED_TR3, r5 | ||
327 | ld.q SP, TLB_SAVED_TR4, r18 | ||
328 | |||
329 | ptabs r2, tr0 | ||
330 | ptabs r3, tr1 | ||
331 | ptabs r4, tr2 | ||
332 | ptabs r5, tr3 | ||
333 | ptabs r18, tr4 | ||
334 | |||
335 | ld.q SP, TLB_SAVED_R0, r0 | ||
336 | ld.q SP, TLB_SAVED_R1, r1 | ||
337 | ld.q SP, SAVED_R2, r2 | ||
338 | ld.q SP, SAVED_R3, r3 | ||
339 | ld.q SP, SAVED_R4, r4 | ||
340 | ld.q SP, SAVED_R5, r5 | ||
341 | ld.q SP, SAVED_R6, r6 | ||
342 | ld.q SP, SAVED_R18, r18 | ||
343 | ld.q SP, TLB_SAVED_R25, r25 | ||
344 | |||
345 | getcon KCR1, SP | ||
346 | rte | ||
347 | nop /* for safety, in case the code is run on sh5-101 cut1.x */ | ||
348 | |||
349 | fixup_to_invoke_general_handler: | ||
350 | |||
351 | /* OK, new method. Restore stuff that's not expected to get saved into | ||
352 | the 'first-level' reg save area, then just fall through to setting | ||
353 | up the registers and calling the second-level handler. */ | ||
354 | |||
355 | /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore | ||
356 | r25,tr1-4 and save r6 to get into the right state. */ | ||
357 | |||
358 | ld.q SP, TLB_SAVED_TR1, r3 | ||
359 | ld.q SP, TLB_SAVED_TR2, r4 | ||
360 | ld.q SP, TLB_SAVED_TR3, r5 | ||
361 | ld.q SP, TLB_SAVED_TR4, r18 | ||
362 | ld.q SP, TLB_SAVED_R25, r25 | ||
363 | |||
364 | ld.q SP, TLB_SAVED_R0, r0 | ||
365 | ld.q SP, TLB_SAVED_R1, r1 | ||
366 | |||
367 | ptabs/u r3, tr1 | ||
368 | ptabs/u r4, tr2 | ||
369 | ptabs/u r5, tr3 | ||
370 | ptabs/u r18, tr4 | ||
371 | |||
372 | /* Set args for Non-debug, TLB miss class handler */ | ||
373 | getcon EXPEVT, r2 | ||
374 | movi ret_from_exception, r3 | ||
375 | ori r3, 1, r3 | ||
376 | movi EVENT_FAULT_TLB, r4 | ||
377 | or SP, ZERO, r5 | ||
378 | getcon KCR1, SP | ||
379 | pta handle_exception, tr0 | ||
380 | blink tr0, ZERO | ||
381 | |||
382 | /* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE | ||
383 | DOES END UP AT VBR+0x600 */ | ||
384 | nop | ||
385 | nop | ||
386 | nop | ||
387 | nop | ||
388 | nop | ||
389 | nop | ||
390 | |||
391 | .balign 256 | ||
392 | /* VBR + 0x600 */ | ||
393 | |||
394 | interrupt: | ||
395 | synco /* TAKum03020 (but probably a good idea anyway.) */ | ||
396 | /* Save original stack pointer into KCR1 */ | ||
397 | putcon SP, KCR1 | ||
398 | |||
399 | /* Save other original registers into reg_save_area */ | ||
400 | movi reg_save_area, SP | ||
401 | st.q SP, SAVED_R2, r2 | ||
402 | st.q SP, SAVED_R3, r3 | ||
403 | st.q SP, SAVED_R4, r4 | ||
404 | st.q SP, SAVED_R5, r5 | ||
405 | st.q SP, SAVED_R6, r6 | ||
406 | st.q SP, SAVED_R18, r18 | ||
407 | gettr tr0, r3 | ||
408 | st.q SP, SAVED_TR0, r3 | ||
409 | |||
410 | /* Set args for interrupt class handler */ | ||
411 | getcon INTEVT, r2 | ||
412 | movi ret_from_irq, r3 | ||
413 | ori r3, 1, r3 | ||
414 | movi EVENT_INTERRUPT, r4 | ||
415 | or SP, ZERO, r5 | ||
416 | getcon KCR1, SP | ||
417 | pta handle_exception, tr0 | ||
418 | blink tr0, ZERO | ||
419 | .balign TEXT_SIZE /* let's waste the bare minimum */ | ||
420 | |||
421 | LVBR_block_end: /* Marker. Used for total checking */ | ||
422 | |||
423 | .balign 256 | ||
424 | LRESVEC_block: | ||
425 | /* Panic handler. Called with MMU off. Possible causes/actions: | ||
426 | * - Reset: Jump to program start. | ||
427 | * - Single Step: Turn off Single Step & return. | ||
428 | * - Others: Call panic handler, passing PC as arg. | ||
429 | * (this may need to be extended...) | ||
430 | */ | ||
431 | reset_or_panic: | ||
432 | synco /* TAKum03020 (but probably a good idea anyway.) */ | ||
433 | putcon SP, DCR | ||
434 | /* First save r0-1 and tr0, as we need to use these */ | ||
435 | movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP | ||
436 | st.q SP, 0, r0 | ||
437 | st.q SP, 8, r1 | ||
438 | gettr tr0, r0 | ||
439 | st.q SP, 32, r0 | ||
440 | |||
441 | /* Check cause */ | ||
442 | getcon EXPEVT, r0 | ||
443 | movi RESET_CAUSE, r1 | ||
444 | sub r1, r0, r1 /* r1=0 if reset */ | ||
445 | movi _stext-CONFIG_CACHED_MEMORY_OFFSET, r0 | ||
446 | ori r0, 1, r0 | ||
447 | ptabs r0, tr0 | ||
448 | beqi r1, 0, tr0 /* Jump to start address if reset */ | ||
449 | |||
450 | getcon EXPEVT, r0 | ||
451 | movi DEBUGSS_CAUSE, r1 | ||
452 | sub r1, r0, r1 /* r1=0 if single step */ | ||
453 | pta single_step_panic, tr0 | ||
454 | beqi r1, 0, tr0 /* jump if single step */ | ||
455 | |||
456 | /* Now jump to where we save the registers. */ | ||
457 | movi panic_stash_regs-CONFIG_CACHED_MEMORY_OFFSET, r1 | ||
458 | ptabs r1, tr0 | ||
459 | blink tr0, r63 | ||
460 | |||
461 | single_step_panic: | ||
462 | /* We are in a handler with Single Step set. We need to resume the | ||
463 | * handler, by turning on MMU & turning off Single Step. */ | ||
464 | getcon SSR, r0 | ||
465 | movi SR_MMU, r1 | ||
466 | or r0, r1, r0 | ||
467 | movi ~SR_SS, r1 | ||
468 | and r0, r1, r0 | ||
469 | putcon r0, SSR | ||
470 | /* Restore EXPEVT, as the rte won't do this */ | ||
471 | getcon PEXPEVT, r0 | ||
472 | putcon r0, EXPEVT | ||
473 | /* Restore regs */ | ||
474 | ld.q SP, 32, r0 | ||
475 | ptabs r0, tr0 | ||
476 | ld.q SP, 0, r0 | ||
477 | ld.q SP, 8, r1 | ||
478 | getcon DCR, SP | ||
479 | synco | ||
480 | rte | ||
481 | |||
482 | |||
483 | .balign 256 | ||
484 | debug_exception: | ||
485 | synco /* TAKum03020 (but probably a good idea anyway.) */ | ||
486 | /* | ||
487 | * Single step/software_break_point first level handler. | ||
488 | * Called with MMU off, so the first thing we do is enable it | ||
489 | * by doing an rte with appropriate SSR. | ||
490 | */ | ||
491 | putcon SP, DCR | ||
492 | /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */ | ||
493 | movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP | ||
494 | |||
495 | /* With the MMU off, we are bypassing the cache, so purge any | ||
496 | * data that will be made stale by the following stores. | ||
497 | */ | ||
498 | ocbp SP, 0 | ||
499 | synco | ||
500 | |||
501 | st.q SP, 0, r0 | ||
502 | st.q SP, 8, r1 | ||
503 | getcon SPC, r0 | ||
504 | st.q SP, 16, r0 | ||
505 | getcon SSR, r0 | ||
506 | st.q SP, 24, r0 | ||
507 | |||
508 | /* Enable MMU, block exceptions, set priv mode, disable single step */ | ||
509 | movi SR_MMU | SR_BL | SR_MD, r1 | ||
510 | or r0, r1, r0 | ||
511 | movi ~SR_SS, r1 | ||
512 | and r0, r1, r0 | ||
513 | putcon r0, SSR | ||
514 | /* Force control to debug_exception_2 when rte is executed */ | ||
515 | movi debug_exeception_2, r0 | ||
516 | ori r0, 1, r0 /* force SHmedia, just in case */ | ||
517 | putcon r0, SPC | ||
518 | getcon DCR, SP | ||
519 | synco | ||
520 | rte | ||
521 | debug_exeception_2: | ||
522 | /* Restore saved regs */ | ||
523 | putcon SP, KCR1 | ||
524 | movi resvec_save_area, SP | ||
525 | ld.q SP, 24, r0 | ||
526 | putcon r0, SSR | ||
527 | ld.q SP, 16, r0 | ||
528 | putcon r0, SPC | ||
529 | ld.q SP, 0, r0 | ||
530 | ld.q SP, 8, r1 | ||
531 | |||
532 | /* Save other original registers into reg_save_area */ | ||
533 | movi reg_save_area, SP | ||
534 | st.q SP, SAVED_R2, r2 | ||
535 | st.q SP, SAVED_R3, r3 | ||
536 | st.q SP, SAVED_R4, r4 | ||
537 | st.q SP, SAVED_R5, r5 | ||
538 | st.q SP, SAVED_R6, r6 | ||
539 | st.q SP, SAVED_R18, r18 | ||
540 | gettr tr0, r3 | ||
541 | st.q SP, SAVED_TR0, r3 | ||
542 | |||
543 | /* Set args for debug class handler */ | ||
544 | getcon EXPEVT, r2 | ||
545 | movi ret_from_exception, r3 | ||
546 | ori r3, 1, r3 | ||
547 | movi EVENT_DEBUG, r4 | ||
548 | or SP, ZERO, r5 | ||
549 | getcon KCR1, SP | ||
550 | pta handle_exception, tr0 | ||
551 | blink tr0, ZERO | ||
552 | |||
553 | .balign 256 | ||
554 | debug_interrupt: | ||
555 | /* !!! WE COME HERE IN REAL MODE !!! */ | ||
556 | /* Hook-up debug interrupt to allow various debugging options to be | ||
557 | * hooked into its handler. */ | ||
558 | /* Save original stack pointer into KCR1 */ | ||
559 | synco | ||
560 | putcon SP, KCR1 | ||
561 | movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP | ||
562 | ocbp SP, 0 | ||
563 | ocbp SP, 32 | ||
564 | synco | ||
565 | |||
566 | /* Save other original registers into reg_save_area thru real addresses */ | ||
567 | st.q SP, SAVED_R2, r2 | ||
568 | st.q SP, SAVED_R3, r3 | ||
569 | st.q SP, SAVED_R4, r4 | ||
570 | st.q SP, SAVED_R5, r5 | ||
571 | st.q SP, SAVED_R6, r6 | ||
572 | st.q SP, SAVED_R18, r18 | ||
573 | gettr tr0, r3 | ||
574 | st.q SP, SAVED_TR0, r3 | ||
575 | |||
576 | /* move (spc,ssr)->(pspc,pssr). The rte will shift | ||
577 | them back again, so that they look like the originals | ||
578 | as far as the real handler code is concerned. */ | ||
579 | getcon spc, r6 | ||
580 | putcon r6, pspc | ||
581 | getcon ssr, r6 | ||
582 | putcon r6, pssr | ||
583 | |||
584 | ! construct useful SR for handle_exception | ||
585 | movi 3, r6 | ||
586 | shlli r6, 30, r6 | ||
587 | getcon sr, r18 | ||
588 | or r18, r6, r6 | ||
589 | putcon r6, ssr | ||
590 | |||
591 | ! SSR is now the current SR with the MD and MMU bits set | ||
592 | ! i.e. the rte will switch back to priv mode and put | ||
593 | ! the mmu back on | ||
594 | |||
595 | ! construct spc | ||
596 | movi handle_exception, r18 | ||
597 | ori r18, 1, r18 ! for safety (do we need this?) | ||
598 | putcon r18, spc | ||
599 | |||
600 | /* Set args for Non-debug, Not a TLB miss class handler */ | ||
601 | |||
602 | ! EXPEVT==0x80 is unused, so 'steal' this value to put the | ||
603 | ! debug interrupt handler in the vectoring table | ||
604 | movi 0x80, r2 | ||
605 | movi ret_from_exception, r3 | ||
606 | ori r3, 1, r3 | ||
607 | movi EVENT_FAULT_NOT_TLB, r4 | ||
608 | |||
609 | or SP, ZERO, r5 | ||
610 | movi CONFIG_CACHED_MEMORY_OFFSET, r6 | ||
611 | add r6, r5, r5 | ||
612 | getcon KCR1, SP | ||
613 | |||
614 | synco ! for safety | ||
615 | rte ! -> handle_exception, switch back to priv mode again | ||
616 | |||
617 | LRESVEC_block_end: /* Marker. Unused. */ | ||
618 | |||
619 | .balign TEXT_SIZE | ||
620 | |||
621 | /* | ||
622 | * Second level handler for VBR-based exceptions. Pre-handler. | ||
623 | * In common to all stack-frame sensitive handlers. | ||
624 | * | ||
625 | * Inputs: | ||
626 | * (KCR0) Current [current task union] | ||
627 | * (KCR1) Original SP | ||
628 | * (r2) INTEVT/EXPEVT | ||
629 | * (r3) appropriate return address | ||
630 | * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug) | ||
631 | * (r5) Pointer to reg_save_area | ||
632 | * (SP) Original SP | ||
633 | * | ||
634 | * Available registers: | ||
635 | * (r6) | ||
636 | * (r18) | ||
637 | * (tr0) | ||
638 | * | ||
639 | */ | ||
640 | handle_exception: | ||
641 | /* Common 2nd level handler. */ | ||
642 | |||
643 | /* First thing we need an appropriate stack pointer */ | ||
644 | getcon SSR, r6 | ||
645 | shlri r6, 30, r6 | ||
646 | andi r6, 1, r6 | ||
647 | pta stack_ok, tr0 | ||
648 | bne r6, ZERO, tr0 /* Original stack pointer is fine */ | ||
649 | |||
650 | /* Set stack pointer for user fault */ | ||
651 | getcon KCR0, SP | ||
652 | movi THREAD_SIZE, r6 /* Point to the end */ | ||
653 | add SP, r6, SP | ||
654 | |||
655 | stack_ok: | ||
656 | |||
657 | /* DEBUG : check for underflow/overflow of the kernel stack */ | ||
658 | pta no_underflow, tr0 | ||
659 | getcon KCR0, r6 | ||
660 | movi 1024, r18 | ||
661 | add r6, r18, r6 | ||
662 | bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone | ||
663 | |||
664 | /* Just panic to cause a crash. */ | ||
665 | bad_sp: | ||
666 | ld.b r63, 0, r6 | ||
667 | nop | ||
668 | |||
669 | no_underflow: | ||
670 | pta bad_sp, tr0 | ||
671 | getcon kcr0, r6 | ||
672 | movi THREAD_SIZE, r18 | ||
673 | add r18, r6, r6 | ||
674 | bgt SP, r6, tr0 ! sp above the stack | ||
675 | |||
676 | /* Make some room for the BASIC frame. */ | ||
677 | movi -(FRAME_SIZE), r6 | ||
678 | add SP, r6, SP | ||
679 | |||
680 | /* Could do this with no stalling if we had another spare register, but the | ||
681 | code below will be OK. */ | ||
682 | ld.q r5, SAVED_R2, r6 | ||
683 | ld.q r5, SAVED_R3, r18 | ||
684 | st.q SP, FRAME_R(2), r6 | ||
685 | ld.q r5, SAVED_R4, r6 | ||
686 | st.q SP, FRAME_R(3), r18 | ||
687 | ld.q r5, SAVED_R5, r18 | ||
688 | st.q SP, FRAME_R(4), r6 | ||
689 | ld.q r5, SAVED_R6, r6 | ||
690 | st.q SP, FRAME_R(5), r18 | ||
691 | ld.q r5, SAVED_R18, r18 | ||
692 | st.q SP, FRAME_R(6), r6 | ||
693 | ld.q r5, SAVED_TR0, r6 | ||
694 | st.q SP, FRAME_R(18), r18 | ||
695 | st.q SP, FRAME_T(0), r6 | ||
696 | |||
697 | /* Keep old SP around */ | ||
698 | getcon KCR1, r6 | ||
699 | |||
700 | /* Save the rest of the general purpose registers */ | ||
701 | st.q SP, FRAME_R(0), r0 | ||
702 | st.q SP, FRAME_R(1), r1 | ||
703 | st.q SP, FRAME_R(7), r7 | ||
704 | st.q SP, FRAME_R(8), r8 | ||
705 | st.q SP, FRAME_R(9), r9 | ||
706 | st.q SP, FRAME_R(10), r10 | ||
707 | st.q SP, FRAME_R(11), r11 | ||
708 | st.q SP, FRAME_R(12), r12 | ||
709 | st.q SP, FRAME_R(13), r13 | ||
710 | st.q SP, FRAME_R(14), r14 | ||
711 | |||
712 | /* SP is somewhere else */ | ||
713 | st.q SP, FRAME_R(15), r6 | ||
714 | |||
715 | st.q SP, FRAME_R(16), r16 | ||
716 | st.q SP, FRAME_R(17), r17 | ||
717 | /* r18 is saved earlier. */ | ||
718 | st.q SP, FRAME_R(19), r19 | ||
719 | st.q SP, FRAME_R(20), r20 | ||
720 | st.q SP, FRAME_R(21), r21 | ||
721 | st.q SP, FRAME_R(22), r22 | ||
722 | st.q SP, FRAME_R(23), r23 | ||
723 | st.q SP, FRAME_R(24), r24 | ||
724 | st.q SP, FRAME_R(25), r25 | ||
725 | st.q SP, FRAME_R(26), r26 | ||
726 | st.q SP, FRAME_R(27), r27 | ||
727 | st.q SP, FRAME_R(28), r28 | ||
728 | st.q SP, FRAME_R(29), r29 | ||
729 | st.q SP, FRAME_R(30), r30 | ||
730 | st.q SP, FRAME_R(31), r31 | ||
731 | st.q SP, FRAME_R(32), r32 | ||
732 | st.q SP, FRAME_R(33), r33 | ||
733 | st.q SP, FRAME_R(34), r34 | ||
734 | st.q SP, FRAME_R(35), r35 | ||
735 | st.q SP, FRAME_R(36), r36 | ||
736 | st.q SP, FRAME_R(37), r37 | ||
737 | st.q SP, FRAME_R(38), r38 | ||
738 | st.q SP, FRAME_R(39), r39 | ||
739 | st.q SP, FRAME_R(40), r40 | ||
740 | st.q SP, FRAME_R(41), r41 | ||
741 | st.q SP, FRAME_R(42), r42 | ||
742 | st.q SP, FRAME_R(43), r43 | ||
743 | st.q SP, FRAME_R(44), r44 | ||
744 | st.q SP, FRAME_R(45), r45 | ||
745 | st.q SP, FRAME_R(46), r46 | ||
746 | st.q SP, FRAME_R(47), r47 | ||
747 | st.q SP, FRAME_R(48), r48 | ||
748 | st.q SP, FRAME_R(49), r49 | ||
749 | st.q SP, FRAME_R(50), r50 | ||
750 | st.q SP, FRAME_R(51), r51 | ||
751 | st.q SP, FRAME_R(52), r52 | ||
752 | st.q SP, FRAME_R(53), r53 | ||
753 | st.q SP, FRAME_R(54), r54 | ||
754 | st.q SP, FRAME_R(55), r55 | ||
755 | st.q SP, FRAME_R(56), r56 | ||
756 | st.q SP, FRAME_R(57), r57 | ||
757 | st.q SP, FRAME_R(58), r58 | ||
758 | st.q SP, FRAME_R(59), r59 | ||
759 | st.q SP, FRAME_R(60), r60 | ||
760 | st.q SP, FRAME_R(61), r61 | ||
761 | st.q SP, FRAME_R(62), r62 | ||
762 | |||
763 | /* | ||
764 | * Save the S* registers. | ||
765 | */ | ||
766 | getcon SSR, r61 | ||
767 | st.q SP, FRAME_S(FSSR), r61 | ||
768 | getcon SPC, r62 | ||
769 | st.q SP, FRAME_S(FSPC), r62 | ||
770 | movi -1, r62 /* Reset syscall_nr */ | ||
771 | st.q SP, FRAME_S(FSYSCALL_ID), r62 | ||
772 | |||
773 | /* Save the rest of the target registers */ | ||
774 | gettr tr1, r6 | ||
775 | st.q SP, FRAME_T(1), r6 | ||
776 | gettr tr2, r6 | ||
777 | st.q SP, FRAME_T(2), r6 | ||
778 | gettr tr3, r6 | ||
779 | st.q SP, FRAME_T(3), r6 | ||
780 | gettr tr4, r6 | ||
781 | st.q SP, FRAME_T(4), r6 | ||
782 | gettr tr5, r6 | ||
783 | st.q SP, FRAME_T(5), r6 | ||
784 | gettr tr6, r6 | ||
785 | st.q SP, FRAME_T(6), r6 | ||
786 | gettr tr7, r6 | ||
787 | st.q SP, FRAME_T(7), r6 | ||
788 | |||
789 | ! setup FP so that unwinder can wind back through nested kernel mode | ||
790 | ! exceptions | ||
791 | add SP, ZERO, r14 | ||
792 | |||
793 | #ifdef CONFIG_POOR_MANS_STRACE | ||
794 | /* We've pushed all the registers now, so only r2-r4 hold anything | ||
795 | * useful. Move them into callee save registers */ | ||
796 | or r2, ZERO, r28 | ||
797 | or r3, ZERO, r29 | ||
798 | or r4, ZERO, r30 | ||
799 | |||
800 | /* Preserve r2 as the event code */ | ||
801 | movi evt_debug, r3 | ||
802 | ori r3, 1, r3 | ||
803 | ptabs r3, tr0 | ||
804 | |||
805 | or SP, ZERO, r6 | ||
806 | getcon TRA, r5 | ||
807 | blink tr0, LINK | ||
808 | |||
809 | or r28, ZERO, r2 | ||
810 | or r29, ZERO, r3 | ||
811 | or r30, ZERO, r4 | ||
812 | #endif | ||
813 | |||
814 | /* For syscall and debug race condition, get TRA now */ | ||
815 | getcon TRA, r5 | ||
816 | |||
817 | /* We are in a safe position to turn SR.BL off, but set IMASK=0xf | ||
818 | * Also set FD, to catch FPU usage in the kernel. | ||
819 | * | ||
820 | * benedict.gaster@superh.com 29/07/2002 | ||
821 | * | ||
822 | * On all SH5-101 revisions it is unsafe to raise the IMASK and at the | ||
823 | * same time change BL from 1->0, as any pending interrupt of a level | ||
824 | * higher than he previous value of IMASK will leak through and be | ||
825 | * taken unexpectedly. | ||
826 | * | ||
827 | * To avoid this we raise the IMASK and then issue another PUTCON to | ||
828 | * enable interrupts. | ||
829 | */ | ||
830 | getcon SR, r6 | ||
831 | movi SR_IMASK | SR_FD, r7 | ||
832 | or r6, r7, r6 | ||
833 | putcon r6, SR | ||
834 | movi SR_UNBLOCK_EXC, r7 | ||
835 | and r6, r7, r6 | ||
836 | putcon r6, SR | ||
837 | |||
838 | |||
839 | /* Now call the appropriate 3rd level handler */ | ||
840 | or r3, ZERO, LINK | ||
841 | movi trap_jtable, r3 | ||
842 | shlri r2, 3, r2 | ||
843 | ldx.l r2, r3, r3 | ||
844 | shlri r2, 2, r2 | ||
845 | ptabs r3, tr0 | ||
846 | or SP, ZERO, r3 | ||
847 | blink tr0, ZERO | ||
848 | |||
849 | /* | ||
850 | * Second level handler for VBR-based exceptions. Post-handlers. | ||
851 | * | ||
852 | * Post-handlers for interrupts (ret_from_irq), exceptions | ||
853 | * (ret_from_exception) and common reentrance doors (restore_all | ||
854 | * to get back to the original context, ret_from_syscall loop to | ||
855 | * check kernel exiting). | ||
856 | * | ||
857 | * ret_with_reschedule and work_notifysig are an inner lables of | ||
858 | * the ret_from_syscall loop. | ||
859 | * | ||
860 | * In common to all stack-frame sensitive handlers. | ||
861 | * | ||
862 | * Inputs: | ||
863 | * (SP) struct pt_regs *, original register's frame pointer (basic) | ||
864 | * | ||
865 | */ | ||
866 | .global ret_from_irq | ||
867 | ret_from_irq: | ||
868 | #ifdef CONFIG_POOR_MANS_STRACE | ||
869 | pta evt_debug_ret_from_irq, tr0 | ||
870 | ori SP, 0, r2 | ||
871 | blink tr0, LINK | ||
872 | #endif | ||
873 | ld.q SP, FRAME_S(FSSR), r6 | ||
874 | shlri r6, 30, r6 | ||
875 | andi r6, 1, r6 | ||
876 | pta resume_kernel, tr0 | ||
877 | bne r6, ZERO, tr0 /* no further checks */ | ||
878 | STI() | ||
879 | pta ret_with_reschedule, tr0 | ||
880 | blink tr0, ZERO /* Do not check softirqs */ | ||
881 | |||
882 | .global ret_from_exception | ||
883 | ret_from_exception: | ||
884 | preempt_stop() | ||
885 | |||
886 | #ifdef CONFIG_POOR_MANS_STRACE | ||
887 | pta evt_debug_ret_from_exc, tr0 | ||
888 | ori SP, 0, r2 | ||
889 | blink tr0, LINK | ||
890 | #endif | ||
891 | |||
892 | ld.q SP, FRAME_S(FSSR), r6 | ||
893 | shlri r6, 30, r6 | ||
894 | andi r6, 1, r6 | ||
895 | pta resume_kernel, tr0 | ||
896 | bne r6, ZERO, tr0 /* no further checks */ | ||
897 | |||
898 | /* Check softirqs */ | ||
899 | |||
900 | #ifdef CONFIG_PREEMPT | ||
901 | pta ret_from_syscall, tr0 | ||
902 | blink tr0, ZERO | ||
903 | |||
904 | resume_kernel: | ||
905 | pta restore_all, tr0 | ||
906 | |||
907 | getcon KCR0, r6 | ||
908 | ld.l r6, TI_PRE_COUNT, r7 | ||
909 | beq/u r7, ZERO, tr0 | ||
910 | |||
911 | need_resched: | ||
912 | ld.l r6, TI_FLAGS, r7 | ||
913 | movi (1 << TIF_NEED_RESCHED), r8 | ||
914 | and r8, r7, r8 | ||
915 | bne r8, ZERO, tr0 | ||
916 | |||
917 | getcon SR, r7 | ||
918 | andi r7, 0xf0, r7 | ||
919 | bne r7, ZERO, tr0 | ||
920 | |||
921 | movi ((PREEMPT_ACTIVE >> 16) & 65535), r8 | ||
922 | shori (PREEMPT_ACTIVE & 65535), r8 | ||
923 | st.l r6, TI_PRE_COUNT, r8 | ||
924 | |||
925 | STI() | ||
926 | movi schedule, r7 | ||
927 | ori r7, 1, r7 | ||
928 | ptabs r7, tr1 | ||
929 | blink tr1, LINK | ||
930 | |||
931 | st.l r6, TI_PRE_COUNT, ZERO | ||
932 | CLI() | ||
933 | |||
934 | pta need_resched, tr1 | ||
935 | blink tr1, ZERO | ||
936 | #endif | ||
937 | |||
938 | .global ret_from_syscall | ||
939 | ret_from_syscall: | ||
940 | |||
941 | ret_with_reschedule: | ||
942 | getcon KCR0, r6 ! r6 contains current_thread_info | ||
943 | ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags | ||
944 | |||
945 | ! FIXME:!!! | ||
946 | ! no handling of TIF_SYSCALL_TRACE yet!! | ||
947 | |||
948 | movi _TIF_NEED_RESCHED, r8 | ||
949 | and r8, r7, r8 | ||
950 | pta work_resched, tr0 | ||
951 | bne r8, ZERO, tr0 | ||
952 | |||
953 | pta restore_all, tr1 | ||
954 | |||
955 | movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8 | ||
956 | and r8, r7, r8 | ||
957 | pta work_notifysig, tr0 | ||
958 | bne r8, ZERO, tr0 | ||
959 | |||
960 | blink tr1, ZERO | ||
961 | |||
962 | work_resched: | ||
963 | pta ret_from_syscall, tr0 | ||
964 | gettr tr0, LINK | ||
965 | movi schedule, r6 | ||
966 | ptabs r6, tr0 | ||
967 | blink tr0, ZERO /* Call schedule(), return on top */ | ||
968 | |||
969 | work_notifysig: | ||
970 | gettr tr1, LINK | ||
971 | |||
972 | movi do_signal, r6 | ||
973 | ptabs r6, tr0 | ||
974 | or SP, ZERO, r2 | ||
975 | or ZERO, ZERO, r3 | ||
976 | blink tr0, LINK /* Call do_signal(regs, 0), return here */ | ||
977 | |||
978 | restore_all: | ||
979 | /* Do prefetches */ | ||
980 | |||
981 | ld.q SP, FRAME_T(0), r6 | ||
982 | ld.q SP, FRAME_T(1), r7 | ||
983 | ld.q SP, FRAME_T(2), r8 | ||
984 | ld.q SP, FRAME_T(3), r9 | ||
985 | ptabs r6, tr0 | ||
986 | ptabs r7, tr1 | ||
987 | ptabs r8, tr2 | ||
988 | ptabs r9, tr3 | ||
989 | ld.q SP, FRAME_T(4), r6 | ||
990 | ld.q SP, FRAME_T(5), r7 | ||
991 | ld.q SP, FRAME_T(6), r8 | ||
992 | ld.q SP, FRAME_T(7), r9 | ||
993 | ptabs r6, tr4 | ||
994 | ptabs r7, tr5 | ||
995 | ptabs r8, tr6 | ||
996 | ptabs r9, tr7 | ||
997 | |||
998 | ld.q SP, FRAME_R(0), r0 | ||
999 | ld.q SP, FRAME_R(1), r1 | ||
1000 | ld.q SP, FRAME_R(2), r2 | ||
1001 | ld.q SP, FRAME_R(3), r3 | ||
1002 | ld.q SP, FRAME_R(4), r4 | ||
1003 | ld.q SP, FRAME_R(5), r5 | ||
1004 | ld.q SP, FRAME_R(6), r6 | ||
1005 | ld.q SP, FRAME_R(7), r7 | ||
1006 | ld.q SP, FRAME_R(8), r8 | ||
1007 | ld.q SP, FRAME_R(9), r9 | ||
1008 | ld.q SP, FRAME_R(10), r10 | ||
1009 | ld.q SP, FRAME_R(11), r11 | ||
1010 | ld.q SP, FRAME_R(12), r12 | ||
1011 | ld.q SP, FRAME_R(13), r13 | ||
1012 | ld.q SP, FRAME_R(14), r14 | ||
1013 | |||
1014 | ld.q SP, FRAME_R(16), r16 | ||
1015 | ld.q SP, FRAME_R(17), r17 | ||
1016 | ld.q SP, FRAME_R(18), r18 | ||
1017 | ld.q SP, FRAME_R(19), r19 | ||
1018 | ld.q SP, FRAME_R(20), r20 | ||
1019 | ld.q SP, FRAME_R(21), r21 | ||
1020 | ld.q SP, FRAME_R(22), r22 | ||
1021 | ld.q SP, FRAME_R(23), r23 | ||
1022 | ld.q SP, FRAME_R(24), r24 | ||
1023 | ld.q SP, FRAME_R(25), r25 | ||
1024 | ld.q SP, FRAME_R(26), r26 | ||
1025 | ld.q SP, FRAME_R(27), r27 | ||
1026 | ld.q SP, FRAME_R(28), r28 | ||
1027 | ld.q SP, FRAME_R(29), r29 | ||
1028 | ld.q SP, FRAME_R(30), r30 | ||
1029 | ld.q SP, FRAME_R(31), r31 | ||
1030 | ld.q SP, FRAME_R(32), r32 | ||
1031 | ld.q SP, FRAME_R(33), r33 | ||
1032 | ld.q SP, FRAME_R(34), r34 | ||
1033 | ld.q SP, FRAME_R(35), r35 | ||
1034 | ld.q SP, FRAME_R(36), r36 | ||
1035 | ld.q SP, FRAME_R(37), r37 | ||
1036 | ld.q SP, FRAME_R(38), r38 | ||
1037 | ld.q SP, FRAME_R(39), r39 | ||
1038 | ld.q SP, FRAME_R(40), r40 | ||
1039 | ld.q SP, FRAME_R(41), r41 | ||
1040 | ld.q SP, FRAME_R(42), r42 | ||
1041 | ld.q SP, FRAME_R(43), r43 | ||
1042 | ld.q SP, FRAME_R(44), r44 | ||
1043 | ld.q SP, FRAME_R(45), r45 | ||
1044 | ld.q SP, FRAME_R(46), r46 | ||
1045 | ld.q SP, FRAME_R(47), r47 | ||
1046 | ld.q SP, FRAME_R(48), r48 | ||
1047 | ld.q SP, FRAME_R(49), r49 | ||
1048 | ld.q SP, FRAME_R(50), r50 | ||
1049 | ld.q SP, FRAME_R(51), r51 | ||
1050 | ld.q SP, FRAME_R(52), r52 | ||
1051 | ld.q SP, FRAME_R(53), r53 | ||
1052 | ld.q SP, FRAME_R(54), r54 | ||
1053 | ld.q SP, FRAME_R(55), r55 | ||
1054 | ld.q SP, FRAME_R(56), r56 | ||
1055 | ld.q SP, FRAME_R(57), r57 | ||
1056 | ld.q SP, FRAME_R(58), r58 | ||
1057 | |||
1058 | getcon SR, r59 | ||
1059 | movi SR_BLOCK_EXC, r60 | ||
1060 | or r59, r60, r59 | ||
1061 | putcon r59, SR /* SR.BL = 1, keep nesting out */ | ||
1062 | ld.q SP, FRAME_S(FSSR), r61 | ||
1063 | ld.q SP, FRAME_S(FSPC), r62 | ||
1064 | movi SR_ASID_MASK, r60 | ||
1065 | and r59, r60, r59 | ||
1066 | andc r61, r60, r61 /* Clear out older ASID */ | ||
1067 | or r59, r61, r61 /* Retain current ASID */ | ||
1068 | putcon r61, SSR | ||
1069 | putcon r62, SPC | ||
1070 | |||
1071 | /* Ignore FSYSCALL_ID */ | ||
1072 | |||
1073 | ld.q SP, FRAME_R(59), r59 | ||
1074 | ld.q SP, FRAME_R(60), r60 | ||
1075 | ld.q SP, FRAME_R(61), r61 | ||
1076 | ld.q SP, FRAME_R(62), r62 | ||
1077 | |||
1078 | /* Last touch */ | ||
1079 | ld.q SP, FRAME_R(15), SP | ||
1080 | rte | ||
1081 | nop | ||
1082 | |||
1083 | /* | ||
1084 | * Third level handlers for VBR-based exceptions. Adapting args to | ||
1085 | * and/or deflecting to fourth level handlers. | ||
1086 | * | ||
1087 | * Fourth level handlers interface. | ||
1088 | * Most are C-coded handlers directly pointed by the trap_jtable. | ||
1089 | * (Third = Fourth level) | ||
1090 | * Inputs: | ||
1091 | * (r2) fault/interrupt code, entry number (e.g. NMI = 14, | ||
1092 | * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...) | ||
1093 | * (r3) struct pt_regs *, original register's frame pointer | ||
1094 | * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault) | ||
1095 | * (r5) TRA control register (for syscall/debug benefit only) | ||
1096 | * (LINK) return address | ||
1097 | * (SP) = r3 | ||
1098 | * | ||
1099 | * Kernel TLB fault handlers will get a slightly different interface. | ||
1100 | * (r2) struct pt_regs *, original register's frame pointer | ||
1101 | * (r3) writeaccess, whether it's a store fault as opposed to load fault | ||
1102 | * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault | ||
1103 | * (r5) Effective Address of fault | ||
1104 | * (LINK) return address | ||
1105 | * (SP) = r2 | ||
1106 | * | ||
1107 | * fpu_error_or_IRQ? is a helper to deflect to the right cause. | ||
1108 | * | ||
1109 | */ | ||
1110 | tlb_miss_load: | ||
1111 | or SP, ZERO, r2 | ||
1112 | or ZERO, ZERO, r3 /* Read */ | ||
1113 | or ZERO, ZERO, r4 /* Data */ | ||
1114 | getcon TEA, r5 | ||
1115 | pta call_do_page_fault, tr0 | ||
1116 | beq ZERO, ZERO, tr0 | ||
1117 | |||
1118 | tlb_miss_store: | ||
1119 | or SP, ZERO, r2 | ||
1120 | movi 1, r3 /* Write */ | ||
1121 | or ZERO, ZERO, r4 /* Data */ | ||
1122 | getcon TEA, r5 | ||
1123 | pta call_do_page_fault, tr0 | ||
1124 | beq ZERO, ZERO, tr0 | ||
1125 | |||
1126 | itlb_miss_or_IRQ: | ||
1127 | pta its_IRQ, tr0 | ||
1128 | beqi/u r4, EVENT_INTERRUPT, tr0 | ||
1129 | or SP, ZERO, r2 | ||
1130 | or ZERO, ZERO, r3 /* Read */ | ||
1131 | movi 1, r4 /* Text */ | ||
1132 | getcon TEA, r5 | ||
1133 | /* Fall through */ | ||
1134 | |||
1135 | call_do_page_fault: | ||
1136 | movi do_page_fault, r6 | ||
1137 | ptabs r6, tr0 | ||
1138 | blink tr0, ZERO | ||
1139 | |||
1140 | fpu_error_or_IRQA: | ||
1141 | pta its_IRQ, tr0 | ||
1142 | beqi/l r4, EVENT_INTERRUPT, tr0 | ||
1143 | #ifdef CONFIG_SH_FPU | ||
1144 | movi do_fpu_state_restore, r6 | ||
1145 | #else | ||
1146 | movi do_exception_error, r6 | ||
1147 | #endif | ||
1148 | ptabs r6, tr0 | ||
1149 | blink tr0, ZERO | ||
1150 | |||
1151 | fpu_error_or_IRQB: | ||
1152 | pta its_IRQ, tr0 | ||
1153 | beqi/l r4, EVENT_INTERRUPT, tr0 | ||
1154 | #ifdef CONFIG_SH_FPU | ||
1155 | movi do_fpu_state_restore, r6 | ||
1156 | #else | ||
1157 | movi do_exception_error, r6 | ||
1158 | #endif | ||
1159 | ptabs r6, tr0 | ||
1160 | blink tr0, ZERO | ||
1161 | |||
1162 | its_IRQ: | ||
1163 | movi do_IRQ, r6 | ||
1164 | ptabs r6, tr0 | ||
1165 | blink tr0, ZERO | ||
1166 | |||
1167 | /* | ||
1168 | * system_call/unknown_trap third level handler: | ||
1169 | * | ||
1170 | * Inputs: | ||
1171 | * (r2) fault/interrupt code, entry number (TRAP = 11) | ||
1172 | * (r3) struct pt_regs *, original register's frame pointer | ||
1173 | * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault) | ||
1174 | * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr) | ||
1175 | * (SP) = r3 | ||
1176 | * (LINK) return address: ret_from_exception | ||
1177 | * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7) | ||
1178 | * | ||
1179 | * Outputs: | ||
1180 | * (*r3) Syscall reply (Saved r2) | ||
1181 | * (LINK) In case of syscall only it can be scrapped. | ||
1182 | * Common second level post handler will be ret_from_syscall. | ||
1183 | * Common (non-trace) exit point to that is syscall_ret (saving | ||
1184 | * result to r2). Common bad exit point is syscall_bad (returning | ||
1185 | * ENOSYS then saved to r2). | ||
1186 | * | ||
1187 | */ | ||
1188 | |||
1189 | unknown_trap: | ||
1190 | /* Unknown Trap or User Trace */ | ||
1191 | movi do_unknown_trapa, r6 | ||
1192 | ptabs r6, tr0 | ||
1193 | ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */ | ||
1194 | andi r2, 0x1ff, r2 /* r2 = syscall # */ | ||
1195 | blink tr0, LINK | ||
1196 | |||
1197 | pta syscall_ret, tr0 | ||
1198 | blink tr0, ZERO | ||
1199 | |||
1200 | /* New syscall implementation*/ | ||
1201 | system_call: | ||
1202 | pta unknown_trap, tr0 | ||
1203 | or r5, ZERO, r4 /* TRA (=r5) -> r4 */ | ||
1204 | shlri r4, 20, r4 | ||
1205 | bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */ | ||
1206 | |||
1207 | /* It's a system call */ | ||
1208 | st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */ | ||
1209 | andi r5, 0x1ff, r5 /* syscall # -> r5 */ | ||
1210 | |||
1211 | STI() | ||
1212 | |||
1213 | pta syscall_allowed, tr0 | ||
1214 | movi NR_syscalls - 1, r4 /* Last valid */ | ||
1215 | bgeu/l r4, r5, tr0 | ||
1216 | |||
1217 | syscall_bad: | ||
1218 | /* Return ENOSYS ! */ | ||
1219 | movi -(ENOSYS), r2 /* Fall-through */ | ||
1220 | |||
1221 | .global syscall_ret | ||
1222 | syscall_ret: | ||
1223 | st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */ | ||
1224 | |||
1225 | #ifdef CONFIG_POOR_MANS_STRACE | ||
1226 | /* nothing useful in registers at this point */ | ||
1227 | |||
1228 | movi evt_debug2, r5 | ||
1229 | ori r5, 1, r5 | ||
1230 | ptabs r5, tr0 | ||
1231 | ld.q SP, FRAME_R(9), r2 | ||
1232 | or SP, ZERO, r3 | ||
1233 | blink tr0, LINK | ||
1234 | #endif | ||
1235 | |||
1236 | ld.q SP, FRAME_S(FSPC), r2 | ||
1237 | addi r2, 4, r2 /* Move PC, being pre-execution event */ | ||
1238 | st.q SP, FRAME_S(FSPC), r2 | ||
1239 | pta ret_from_syscall, tr0 | ||
1240 | blink tr0, ZERO | ||
1241 | |||
1242 | |||
1243 | /* A different return path for ret_from_fork, because we now need | ||
1244 | * to call schedule_tail with the later kernels. Because prev is | ||
1245 | * loaded into r2 by switch_to() means we can just call it straight away | ||
1246 | */ | ||
1247 | |||
1248 | .global ret_from_fork | ||
1249 | ret_from_fork: | ||
1250 | |||
1251 | movi schedule_tail,r5 | ||
1252 | ori r5, 1, r5 | ||
1253 | ptabs r5, tr0 | ||
1254 | blink tr0, LINK | ||
1255 | |||
1256 | #ifdef CONFIG_POOR_MANS_STRACE | ||
1257 | /* nothing useful in registers at this point */ | ||
1258 | |||
1259 | movi evt_debug2, r5 | ||
1260 | ori r5, 1, r5 | ||
1261 | ptabs r5, tr0 | ||
1262 | ld.q SP, FRAME_R(9), r2 | ||
1263 | or SP, ZERO, r3 | ||
1264 | blink tr0, LINK | ||
1265 | #endif | ||
1266 | |||
1267 | ld.q SP, FRAME_S(FSPC), r2 | ||
1268 | addi r2, 4, r2 /* Move PC, being pre-execution event */ | ||
1269 | st.q SP, FRAME_S(FSPC), r2 | ||
1270 | pta ret_from_syscall, tr0 | ||
1271 | blink tr0, ZERO | ||
1272 | |||
1273 | |||
1274 | |||
1275 | syscall_allowed: | ||
1276 | /* Use LINK to deflect the exit point, default is syscall_ret */ | ||
1277 | pta syscall_ret, tr0 | ||
1278 | gettr tr0, LINK | ||
1279 | pta syscall_notrace, tr0 | ||
1280 | |||
1281 | getcon KCR0, r2 | ||
1282 | ld.l r2, TI_FLAGS, r4 | ||
1283 | movi (1 << TIF_SYSCALL_TRACE), r6 | ||
1284 | and r6, r4, r6 | ||
1285 | beq/l r6, ZERO, tr0 | ||
1286 | |||
1287 | /* Trace it by calling syscall_trace before and after */ | ||
1288 | movi syscall_trace, r4 | ||
1289 | ptabs r4, tr0 | ||
1290 | blink tr0, LINK | ||
1291 | /* Reload syscall number as r5 is trashed by syscall_trace */ | ||
1292 | ld.q SP, FRAME_S(FSYSCALL_ID), r5 | ||
1293 | andi r5, 0x1ff, r5 | ||
1294 | |||
1295 | pta syscall_ret_trace, tr0 | ||
1296 | gettr tr0, LINK | ||
1297 | |||
1298 | syscall_notrace: | ||
1299 | /* Now point to the appropriate 4th level syscall handler */ | ||
1300 | movi sys_call_table, r4 | ||
1301 | shlli r5, 2, r5 | ||
1302 | ldx.l r4, r5, r5 | ||
1303 | ptabs r5, tr0 | ||
1304 | |||
1305 | /* Prepare original args */ | ||
1306 | ld.q SP, FRAME_R(2), r2 | ||
1307 | ld.q SP, FRAME_R(3), r3 | ||
1308 | ld.q SP, FRAME_R(4), r4 | ||
1309 | ld.q SP, FRAME_R(5), r5 | ||
1310 | ld.q SP, FRAME_R(6), r6 | ||
1311 | ld.q SP, FRAME_R(7), r7 | ||
1312 | |||
1313 | /* And now the trick for those syscalls requiring regs * ! */ | ||
1314 | or SP, ZERO, r8 | ||
1315 | |||
1316 | /* Call it */ | ||
1317 | blink tr0, ZERO /* LINK is already properly set */ | ||
1318 | |||
1319 | syscall_ret_trace: | ||
1320 | /* We get back here only if under trace */ | ||
1321 | st.q SP, FRAME_R(9), r2 /* Save return value */ | ||
1322 | |||
1323 | movi syscall_trace, LINK | ||
1324 | ptabs LINK, tr0 | ||
1325 | blink tr0, LINK | ||
1326 | |||
1327 | /* This needs to be done after any syscall tracing */ | ||
1328 | ld.q SP, FRAME_S(FSPC), r2 | ||
1329 | addi r2, 4, r2 /* Move PC, being pre-execution event */ | ||
1330 | st.q SP, FRAME_S(FSPC), r2 | ||
1331 | |||
1332 | pta ret_from_syscall, tr0 | ||
1333 | blink tr0, ZERO /* Resume normal return sequence */ | ||
1334 | |||
1335 | /* | ||
1336 | * --- Switch to running under a particular ASID and return the previous ASID value | ||
1337 | * --- The caller is assumed to have done a cli before calling this. | ||
1338 | * | ||
1339 | * Input r2 : new ASID | ||
1340 | * Output r2 : old ASID | ||
1341 | */ | ||
1342 | |||
1343 | .global switch_and_save_asid | ||
1344 | switch_and_save_asid: | ||
1345 | getcon sr, r0 | ||
1346 | movi 255, r4 | ||
1347 | shlli r4, 16, r4 /* r4 = mask to select ASID */ | ||
1348 | and r0, r4, r3 /* r3 = shifted old ASID */ | ||
1349 | andi r2, 255, r2 /* mask down new ASID */ | ||
1350 | shlli r2, 16, r2 /* align new ASID against SR.ASID */ | ||
1351 | andc r0, r4, r0 /* efface old ASID from SR */ | ||
1352 | or r0, r2, r0 /* insert the new ASID */ | ||
1353 | putcon r0, ssr | ||
1354 | movi 1f, r0 | ||
1355 | putcon r0, spc | ||
1356 | rte | ||
1357 | nop | ||
1358 | 1: | ||
1359 | ptabs LINK, tr0 | ||
1360 | shlri r3, 16, r2 /* r2 = old ASID */ | ||
1361 | blink tr0, r63 | ||
1362 | |||
1363 | .global route_to_panic_handler | ||
1364 | route_to_panic_handler: | ||
1365 | /* Switch to real mode, goto panic_handler, don't return. Useful for | ||
1366 | last-chance debugging, e.g. if no output wants to go to the console. | ||
1367 | */ | ||
1368 | |||
1369 | movi panic_handler - CONFIG_CACHED_MEMORY_OFFSET, r1 | ||
1370 | ptabs r1, tr0 | ||
1371 | pta 1f, tr1 | ||
1372 | gettr tr1, r0 | ||
1373 | putcon r0, spc | ||
1374 | getcon sr, r0 | ||
1375 | movi 1, r1 | ||
1376 | shlli r1, 31, r1 | ||
1377 | andc r0, r1, r0 | ||
1378 | putcon r0, ssr | ||
1379 | rte | ||
1380 | nop | ||
1381 | 1: /* Now in real mode */ | ||
1382 | blink tr0, r63 | ||
1383 | nop | ||
1384 | |||
1385 | .global peek_real_address_q | ||
1386 | peek_real_address_q: | ||
1387 | /* Two args: | ||
1388 | r2 : real mode address to peek | ||
1389 | r2(out) : result quadword | ||
1390 | |||
1391 | This is provided as a cheapskate way of manipulating device | ||
1392 | registers for debugging (to avoid the need to onchip_remap the debug | ||
1393 | module, and to avoid the need to onchip_remap the watchpoint | ||
1394 | controller in a way that identity maps sufficient bits to avoid the | ||
1395 | SH5-101 cut2 silicon defect). | ||
1396 | |||
1397 | This code is not performance critical | ||
1398 | */ | ||
1399 | |||
1400 | add.l r2, r63, r2 /* sign extend address */ | ||
1401 | getcon sr, r0 /* r0 = saved original SR */ | ||
1402 | movi 1, r1 | ||
1403 | shlli r1, 28, r1 | ||
1404 | or r0, r1, r1 /* r0 with block bit set */ | ||
1405 | putcon r1, sr /* now in critical section */ | ||
1406 | movi 1, r36 | ||
1407 | shlli r36, 31, r36 | ||
1408 | andc r1, r36, r1 /* turn sr.mmu off in real mode section */ | ||
1409 | |||
1410 | putcon r1, ssr | ||
1411 | movi .peek0 - CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */ | ||
1412 | movi 1f, r37 /* virtual mode return addr */ | ||
1413 | putcon r36, spc | ||
1414 | |||
1415 | synco | ||
1416 | rte | ||
1417 | nop | ||
1418 | |||
1419 | .peek0: /* come here in real mode, don't touch caches!! | ||
1420 | still in critical section (sr.bl==1) */ | ||
1421 | putcon r0, ssr | ||
1422 | putcon r37, spc | ||
1423 | /* Here's the actual peek. If the address is bad, all bets are now off | ||
1424 | * what will happen (handlers invoked in real-mode = bad news) */ | ||
1425 | ld.q r2, 0, r2 | ||
1426 | synco | ||
1427 | rte /* Back to virtual mode */ | ||
1428 | nop | ||
1429 | |||
1430 | 1: | ||
1431 | ptabs LINK, tr0 | ||
1432 | blink tr0, r63 | ||
1433 | |||
1434 | .global poke_real_address_q | ||
1435 | poke_real_address_q: | ||
1436 | /* Two args: | ||
1437 | r2 : real mode address to poke | ||
1438 | r3 : quadword value to write. | ||
1439 | |||
1440 | This is provided as a cheapskate way of manipulating device | ||
1441 | registers for debugging (to avoid the need to onchip_remap the debug | ||
1442 | module, and to avoid the need to onchip_remap the watchpoint | ||
1443 | controller in a way that identity maps sufficient bits to avoid the | ||
1444 | SH5-101 cut2 silicon defect). | ||
1445 | |||
1446 | This code is not performance critical | ||
1447 | */ | ||
1448 | |||
1449 | add.l r2, r63, r2 /* sign extend address */ | ||
1450 | getcon sr, r0 /* r0 = saved original SR */ | ||
1451 | movi 1, r1 | ||
1452 | shlli r1, 28, r1 | ||
1453 | or r0, r1, r1 /* r0 with block bit set */ | ||
1454 | putcon r1, sr /* now in critical section */ | ||
1455 | movi 1, r36 | ||
1456 | shlli r36, 31, r36 | ||
1457 | andc r1, r36, r1 /* turn sr.mmu off in real mode section */ | ||
1458 | |||
1459 | putcon r1, ssr | ||
1460 | movi .poke0-CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */ | ||
1461 | movi 1f, r37 /* virtual mode return addr */ | ||
1462 | putcon r36, spc | ||
1463 | |||
1464 | synco | ||
1465 | rte | ||
1466 | nop | ||
1467 | |||
1468 | .poke0: /* come here in real mode, don't touch caches!! | ||
1469 | still in critical section (sr.bl==1) */ | ||
1470 | putcon r0, ssr | ||
1471 | putcon r37, spc | ||
1472 | /* Here's the actual poke. If the address is bad, all bets are now off | ||
1473 | * what will happen (handlers invoked in real-mode = bad news) */ | ||
1474 | st.q r2, 0, r3 | ||
1475 | synco | ||
1476 | rte /* Back to virtual mode */ | ||
1477 | nop | ||
1478 | |||
1479 | 1: | ||
1480 | ptabs LINK, tr0 | ||
1481 | blink tr0, r63 | ||
1482 | |||
1483 | /* | ||
1484 | * --- User Access Handling Section | ||
1485 | */ | ||
1486 | |||
1487 | /* | ||
1488 | * User Access support. It all moved to non inlined Assembler | ||
1489 | * functions in here. | ||
1490 | * | ||
1491 | * __kernel_size_t __copy_user(void *__to, const void *__from, | ||
1492 | * __kernel_size_t __n) | ||
1493 | * | ||
1494 | * Inputs: | ||
1495 | * (r2) target address | ||
1496 | * (r3) source address | ||
1497 | * (r4) size in bytes | ||
1498 | * | ||
1499 | * Ouputs: | ||
1500 | * (*r2) target data | ||
1501 | * (r2) non-copied bytes | ||
1502 | * | ||
1503 | * If a fault occurs on the user pointer, bail out early and return the | ||
1504 | * number of bytes not copied in r2. | ||
1505 | * Strategy : for large blocks, call a real memcpy function which can | ||
1506 | * move >1 byte at a time using unaligned ld/st instructions, and can | ||
1507 | * manipulate the cache using prefetch + alloco to improve the speed | ||
1508 | * further. If a fault occurs in that function, just revert to the | ||
1509 | * byte-by-byte approach used for small blocks; this is rare so the | ||
1510 | * performance hit for that case does not matter. | ||
1511 | * | ||
1512 | * For small blocks it's not worth the overhead of setting up and calling | ||
1513 | * the memcpy routine; do the copy a byte at a time. | ||
1514 | * | ||
1515 | */ | ||
1516 | .global __copy_user | ||
1517 | __copy_user: | ||
1518 | pta __copy_user_byte_by_byte, tr1 | ||
1519 | movi 16, r0 ! this value is a best guess, should tune it by benchmarking | ||
1520 | bge/u r0, r4, tr1 | ||
1521 | pta copy_user_memcpy, tr0 | ||
1522 | addi SP, -32, SP | ||
1523 | /* Save arguments in case we have to fix-up unhandled page fault */ | ||
1524 | st.q SP, 0, r2 | ||
1525 | st.q SP, 8, r3 | ||
1526 | st.q SP, 16, r4 | ||
1527 | st.q SP, 24, r35 ! r35 is callee-save | ||
1528 | /* Save LINK in a register to reduce RTS time later (otherwise | ||
1529 | ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */ | ||
1530 | ori LINK, 0, r35 | ||
1531 | blink tr0, LINK | ||
1532 | |||
1533 | /* Copy completed normally if we get back here */ | ||
1534 | ptabs r35, tr0 | ||
1535 | ld.q SP, 24, r35 | ||
1536 | /* don't restore r2-r4, pointless */ | ||
1537 | /* set result=r2 to zero as the copy must have succeeded. */ | ||
1538 | or r63, r63, r2 | ||
1539 | addi SP, 32, SP | ||
1540 | blink tr0, r63 ! RTS | ||
1541 | |||
1542 | .global __copy_user_fixup | ||
1543 | __copy_user_fixup: | ||
1544 | /* Restore stack frame */ | ||
1545 | ori r35, 0, LINK | ||
1546 | ld.q SP, 24, r35 | ||
1547 | ld.q SP, 16, r4 | ||
1548 | ld.q SP, 8, r3 | ||
1549 | ld.q SP, 0, r2 | ||
1550 | addi SP, 32, SP | ||
1551 | /* Fall through to original code, in the 'same' state we entered with */ | ||
1552 | |||
1553 | /* The slow byte-by-byte method is used if the fast copy traps due to a bad | ||
1554 | user address. In that rare case, the speed drop can be tolerated. */ | ||
1555 | __copy_user_byte_by_byte: | ||
1556 | pta ___copy_user_exit, tr1 | ||
1557 | pta ___copy_user1, tr0 | ||
1558 | beq/u r4, r63, tr1 /* early exit for zero length copy */ | ||
1559 | sub r2, r3, r0 | ||
1560 | addi r0, -1, r0 | ||
1561 | |||
1562 | ___copy_user1: | ||
1563 | ld.b r3, 0, r5 /* Fault address 1 */ | ||
1564 | |||
1565 | /* Could rewrite this to use just 1 add, but the second comes 'free' | ||
1566 | due to load latency */ | ||
1567 | addi r3, 1, r3 | ||
1568 | addi r4, -1, r4 /* No real fixup required */ | ||
1569 | ___copy_user2: | ||
1570 | stx.b r3, r0, r5 /* Fault address 2 */ | ||
1571 | bne r4, ZERO, tr0 | ||
1572 | |||
1573 | ___copy_user_exit: | ||
1574 | or r4, ZERO, r2 | ||
1575 | ptabs LINK, tr0 | ||
1576 | blink tr0, ZERO | ||
1577 | |||
1578 | /* | ||
1579 | * __kernel_size_t __clear_user(void *addr, __kernel_size_t size) | ||
1580 | * | ||
1581 | * Inputs: | ||
1582 | * (r2) target address | ||
1583 | * (r3) size in bytes | ||
1584 | * | ||
1585 | * Ouputs: | ||
1586 | * (*r2) zero-ed target data | ||
1587 | * (r2) non-zero-ed bytes | ||
1588 | */ | ||
1589 | .global __clear_user | ||
1590 | __clear_user: | ||
1591 | pta ___clear_user_exit, tr1 | ||
1592 | pta ___clear_user1, tr0 | ||
1593 | beq/u r3, r63, tr1 | ||
1594 | |||
1595 | ___clear_user1: | ||
1596 | st.b r2, 0, ZERO /* Fault address */ | ||
1597 | addi r2, 1, r2 | ||
1598 | addi r3, -1, r3 /* No real fixup required */ | ||
1599 | bne r3, ZERO, tr0 | ||
1600 | |||
1601 | ___clear_user_exit: | ||
1602 | or r3, ZERO, r2 | ||
1603 | ptabs LINK, tr0 | ||
1604 | blink tr0, ZERO | ||
1605 | |||
1606 | |||
1607 | /* | ||
1608 | * int __strncpy_from_user(unsigned long __dest, unsigned long __src, | ||
1609 | * int __count) | ||
1610 | * | ||
1611 | * Inputs: | ||
1612 | * (r2) target address | ||
1613 | * (r3) source address | ||
1614 | * (r4) maximum size in bytes | ||
1615 | * | ||
1616 | * Ouputs: | ||
1617 | * (*r2) copied data | ||
1618 | * (r2) -EFAULT (in case of faulting) | ||
1619 | * copied data (otherwise) | ||
1620 | */ | ||
1621 | .global __strncpy_from_user | ||
1622 | __strncpy_from_user: | ||
1623 | pta ___strncpy_from_user1, tr0 | ||
1624 | pta ___strncpy_from_user_done, tr1 | ||
1625 | or r4, ZERO, r5 /* r5 = original count */ | ||
1626 | beq/u r4, r63, tr1 /* early exit if r4==0 */ | ||
1627 | movi -(EFAULT), r6 /* r6 = reply, no real fixup */ | ||
1628 | or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ | ||
1629 | |||
1630 | ___strncpy_from_user1: | ||
1631 | ld.b r3, 0, r7 /* Fault address: only in reading */ | ||
1632 | st.b r2, 0, r7 | ||
1633 | addi r2, 1, r2 | ||
1634 | addi r3, 1, r3 | ||
1635 | beq/u ZERO, r7, tr1 | ||
1636 | addi r4, -1, r4 /* return real number of copied bytes */ | ||
1637 | bne/l ZERO, r4, tr0 | ||
1638 | |||
1639 | ___strncpy_from_user_done: | ||
1640 | sub r5, r4, r6 /* If done, return copied */ | ||
1641 | |||
1642 | ___strncpy_from_user_exit: | ||
1643 | or r6, ZERO, r2 | ||
1644 | ptabs LINK, tr0 | ||
1645 | blink tr0, ZERO | ||
1646 | |||
1647 | /* | ||
1648 | * extern long __strnlen_user(const char *__s, long __n) | ||
1649 | * | ||
1650 | * Inputs: | ||
1651 | * (r2) source address | ||
1652 | * (r3) source size in bytes | ||
1653 | * | ||
1654 | * Ouputs: | ||
1655 | * (r2) -EFAULT (in case of faulting) | ||
1656 | * string length (otherwise) | ||
1657 | */ | ||
1658 | .global __strnlen_user | ||
1659 | __strnlen_user: | ||
1660 | pta ___strnlen_user_set_reply, tr0 | ||
1661 | pta ___strnlen_user1, tr1 | ||
1662 | or ZERO, ZERO, r5 /* r5 = counter */ | ||
1663 | movi -(EFAULT), r6 /* r6 = reply, no real fixup */ | ||
1664 | or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ | ||
1665 | beq r3, ZERO, tr0 | ||
1666 | |||
1667 | ___strnlen_user1: | ||
1668 | ldx.b r2, r5, r7 /* Fault address: only in reading */ | ||
1669 | addi r3, -1, r3 /* No real fixup */ | ||
1670 | addi r5, 1, r5 | ||
1671 | beq r3, ZERO, tr0 | ||
1672 | bne r7, ZERO, tr1 | ||
1673 | ! The line below used to be active. This meant led to a junk byte lying between each pair | ||
1674 | ! of entries in the argv & envp structures in memory. Whilst the program saw the right data | ||
1675 | ! via the argv and envp arguments to main, it meant the 'flat' representation visible through | ||
1676 | ! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example. | ||
1677 | ! addi r5, 1, r5 /* Include '\0' */ | ||
1678 | |||
1679 | ___strnlen_user_set_reply: | ||
1680 | or r5, ZERO, r6 /* If done, return counter */ | ||
1681 | |||
1682 | ___strnlen_user_exit: | ||
1683 | or r6, ZERO, r2 | ||
1684 | ptabs LINK, tr0 | ||
1685 | blink tr0, ZERO | ||
1686 | |||
1687 | /* | ||
1688 | * extern long __get_user_asm_?(void *val, long addr) | ||
1689 | * | ||
1690 | * Inputs: | ||
1691 | * (r2) dest address | ||
1692 | * (r3) source address (in User Space) | ||
1693 | * | ||
1694 | * Ouputs: | ||
1695 | * (r2) -EFAULT (faulting) | ||
1696 | * 0 (not faulting) | ||
1697 | */ | ||
1698 | .global __get_user_asm_b | ||
1699 | __get_user_asm_b: | ||
1700 | or r2, ZERO, r4 | ||
1701 | movi -(EFAULT), r2 /* r2 = reply, no real fixup */ | ||
1702 | |||
1703 | ___get_user_asm_b1: | ||
1704 | ld.b r3, 0, r5 /* r5 = data */ | ||
1705 | st.b r4, 0, r5 | ||
1706 | or ZERO, ZERO, r2 | ||
1707 | |||
1708 | ___get_user_asm_b_exit: | ||
1709 | ptabs LINK, tr0 | ||
1710 | blink tr0, ZERO | ||
1711 | |||
1712 | |||
1713 | .global __get_user_asm_w | ||
1714 | __get_user_asm_w: | ||
1715 | or r2, ZERO, r4 | ||
1716 | movi -(EFAULT), r2 /* r2 = reply, no real fixup */ | ||
1717 | |||
1718 | ___get_user_asm_w1: | ||
1719 | ld.w r3, 0, r5 /* r5 = data */ | ||
1720 | st.w r4, 0, r5 | ||
1721 | or ZERO, ZERO, r2 | ||
1722 | |||
1723 | ___get_user_asm_w_exit: | ||
1724 | ptabs LINK, tr0 | ||
1725 | blink tr0, ZERO | ||
1726 | |||
1727 | |||
1728 | .global __get_user_asm_l | ||
1729 | __get_user_asm_l: | ||
1730 | or r2, ZERO, r4 | ||
1731 | movi -(EFAULT), r2 /* r2 = reply, no real fixup */ | ||
1732 | |||
1733 | ___get_user_asm_l1: | ||
1734 | ld.l r3, 0, r5 /* r5 = data */ | ||
1735 | st.l r4, 0, r5 | ||
1736 | or ZERO, ZERO, r2 | ||
1737 | |||
1738 | ___get_user_asm_l_exit: | ||
1739 | ptabs LINK, tr0 | ||
1740 | blink tr0, ZERO | ||
1741 | |||
1742 | |||
1743 | .global __get_user_asm_q | ||
1744 | __get_user_asm_q: | ||
1745 | or r2, ZERO, r4 | ||
1746 | movi -(EFAULT), r2 /* r2 = reply, no real fixup */ | ||
1747 | |||
1748 | ___get_user_asm_q1: | ||
1749 | ld.q r3, 0, r5 /* r5 = data */ | ||
1750 | st.q r4, 0, r5 | ||
1751 | or ZERO, ZERO, r2 | ||
1752 | |||
1753 | ___get_user_asm_q_exit: | ||
1754 | ptabs LINK, tr0 | ||
1755 | blink tr0, ZERO | ||
1756 | |||
1757 | /* | ||
1758 | * extern long __put_user_asm_?(void *pval, long addr) | ||
1759 | * | ||
1760 | * Inputs: | ||
1761 | * (r2) kernel pointer to value | ||
1762 | * (r3) dest address (in User Space) | ||
1763 | * | ||
1764 | * Ouputs: | ||
1765 | * (r2) -EFAULT (faulting) | ||
1766 | * 0 (not faulting) | ||
1767 | */ | ||
1768 | .global __put_user_asm_b | ||
1769 | __put_user_asm_b: | ||
1770 | ld.b r2, 0, r4 /* r4 = data */ | ||
1771 | movi -(EFAULT), r2 /* r2 = reply, no real fixup */ | ||
1772 | |||
1773 | ___put_user_asm_b1: | ||
1774 | st.b r3, 0, r4 | ||
1775 | or ZERO, ZERO, r2 | ||
1776 | |||
1777 | ___put_user_asm_b_exit: | ||
1778 | ptabs LINK, tr0 | ||
1779 | blink tr0, ZERO | ||
1780 | |||
1781 | |||
1782 | .global __put_user_asm_w | ||
1783 | __put_user_asm_w: | ||
1784 | ld.w r2, 0, r4 /* r4 = data */ | ||
1785 | movi -(EFAULT), r2 /* r2 = reply, no real fixup */ | ||
1786 | |||
1787 | ___put_user_asm_w1: | ||
1788 | st.w r3, 0, r4 | ||
1789 | or ZERO, ZERO, r2 | ||
1790 | |||
1791 | ___put_user_asm_w_exit: | ||
1792 | ptabs LINK, tr0 | ||
1793 | blink tr0, ZERO | ||
1794 | |||
1795 | |||
1796 | .global __put_user_asm_l | ||
1797 | __put_user_asm_l: | ||
1798 | ld.l r2, 0, r4 /* r4 = data */ | ||
1799 | movi -(EFAULT), r2 /* r2 = reply, no real fixup */ | ||
1800 | |||
1801 | ___put_user_asm_l1: | ||
1802 | st.l r3, 0, r4 | ||
1803 | or ZERO, ZERO, r2 | ||
1804 | |||
1805 | ___put_user_asm_l_exit: | ||
1806 | ptabs LINK, tr0 | ||
1807 | blink tr0, ZERO | ||
1808 | |||
1809 | |||
1810 | .global __put_user_asm_q | ||
1811 | __put_user_asm_q: | ||
1812 | ld.q r2, 0, r4 /* r4 = data */ | ||
1813 | movi -(EFAULT), r2 /* r2 = reply, no real fixup */ | ||
1814 | |||
1815 | ___put_user_asm_q1: | ||
1816 | st.q r3, 0, r4 | ||
1817 | or ZERO, ZERO, r2 | ||
1818 | |||
1819 | ___put_user_asm_q_exit: | ||
1820 | ptabs LINK, tr0 | ||
1821 | blink tr0, ZERO | ||
1822 | |||
1823 | panic_stash_regs: | ||
1824 | /* The idea is : when we get an unhandled panic, we dump the registers | ||
1825 | to a known memory location, the just sit in a tight loop. | ||
1826 | This allows the human to look at the memory region through the GDB | ||
1827 | session (assuming the debug module's SHwy initiator isn't locked up | ||
1828 | or anything), to hopefully analyze the cause of the panic. */ | ||
1829 | |||
1830 | /* On entry, former r15 (SP) is in DCR | ||
1831 | former r0 is at resvec_saved_area + 0 | ||
1832 | former r1 is at resvec_saved_area + 8 | ||
1833 | former tr0 is at resvec_saved_area + 32 | ||
1834 | DCR is the only register whose value is lost altogether. | ||
1835 | */ | ||
1836 | |||
1837 | movi 0xffffffff80000000, r0 ! phy of dump area | ||
1838 | ld.q SP, 0x000, r1 ! former r0 | ||
1839 | st.q r0, 0x000, r1 | ||
1840 | ld.q SP, 0x008, r1 ! former r1 | ||
1841 | st.q r0, 0x008, r1 | ||
1842 | st.q r0, 0x010, r2 | ||
1843 | st.q r0, 0x018, r3 | ||
1844 | st.q r0, 0x020, r4 | ||
1845 | st.q r0, 0x028, r5 | ||
1846 | st.q r0, 0x030, r6 | ||
1847 | st.q r0, 0x038, r7 | ||
1848 | st.q r0, 0x040, r8 | ||
1849 | st.q r0, 0x048, r9 | ||
1850 | st.q r0, 0x050, r10 | ||
1851 | st.q r0, 0x058, r11 | ||
1852 | st.q r0, 0x060, r12 | ||
1853 | st.q r0, 0x068, r13 | ||
1854 | st.q r0, 0x070, r14 | ||
1855 | getcon dcr, r14 | ||
1856 | st.q r0, 0x078, r14 | ||
1857 | st.q r0, 0x080, r16 | ||
1858 | st.q r0, 0x088, r17 | ||
1859 | st.q r0, 0x090, r18 | ||
1860 | st.q r0, 0x098, r19 | ||
1861 | st.q r0, 0x0a0, r20 | ||
1862 | st.q r0, 0x0a8, r21 | ||
1863 | st.q r0, 0x0b0, r22 | ||
1864 | st.q r0, 0x0b8, r23 | ||
1865 | st.q r0, 0x0c0, r24 | ||
1866 | st.q r0, 0x0c8, r25 | ||
1867 | st.q r0, 0x0d0, r26 | ||
1868 | st.q r0, 0x0d8, r27 | ||
1869 | st.q r0, 0x0e0, r28 | ||
1870 | st.q r0, 0x0e8, r29 | ||
1871 | st.q r0, 0x0f0, r30 | ||
1872 | st.q r0, 0x0f8, r31 | ||
1873 | st.q r0, 0x100, r32 | ||
1874 | st.q r0, 0x108, r33 | ||
1875 | st.q r0, 0x110, r34 | ||
1876 | st.q r0, 0x118, r35 | ||
1877 | st.q r0, 0x120, r36 | ||
1878 | st.q r0, 0x128, r37 | ||
1879 | st.q r0, 0x130, r38 | ||
1880 | st.q r0, 0x138, r39 | ||
1881 | st.q r0, 0x140, r40 | ||
1882 | st.q r0, 0x148, r41 | ||
1883 | st.q r0, 0x150, r42 | ||
1884 | st.q r0, 0x158, r43 | ||
1885 | st.q r0, 0x160, r44 | ||
1886 | st.q r0, 0x168, r45 | ||
1887 | st.q r0, 0x170, r46 | ||
1888 | st.q r0, 0x178, r47 | ||
1889 | st.q r0, 0x180, r48 | ||
1890 | st.q r0, 0x188, r49 | ||
1891 | st.q r0, 0x190, r50 | ||
1892 | st.q r0, 0x198, r51 | ||
1893 | st.q r0, 0x1a0, r52 | ||
1894 | st.q r0, 0x1a8, r53 | ||
1895 | st.q r0, 0x1b0, r54 | ||
1896 | st.q r0, 0x1b8, r55 | ||
1897 | st.q r0, 0x1c0, r56 | ||
1898 | st.q r0, 0x1c8, r57 | ||
1899 | st.q r0, 0x1d0, r58 | ||
1900 | st.q r0, 0x1d8, r59 | ||
1901 | st.q r0, 0x1e0, r60 | ||
1902 | st.q r0, 0x1e8, r61 | ||
1903 | st.q r0, 0x1f0, r62 | ||
1904 | st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake... | ||
1905 | |||
1906 | ld.q SP, 0x020, r1 ! former tr0 | ||
1907 | st.q r0, 0x200, r1 | ||
1908 | gettr tr1, r1 | ||
1909 | st.q r0, 0x208, r1 | ||
1910 | gettr tr2, r1 | ||
1911 | st.q r0, 0x210, r1 | ||
1912 | gettr tr3, r1 | ||
1913 | st.q r0, 0x218, r1 | ||
1914 | gettr tr4, r1 | ||
1915 | st.q r0, 0x220, r1 | ||
1916 | gettr tr5, r1 | ||
1917 | st.q r0, 0x228, r1 | ||
1918 | gettr tr6, r1 | ||
1919 | st.q r0, 0x230, r1 | ||
1920 | gettr tr7, r1 | ||
1921 | st.q r0, 0x238, r1 | ||
1922 | |||
1923 | getcon sr, r1 | ||
1924 | getcon ssr, r2 | ||
1925 | getcon pssr, r3 | ||
1926 | getcon spc, r4 | ||
1927 | getcon pspc, r5 | ||
1928 | getcon intevt, r6 | ||
1929 | getcon expevt, r7 | ||
1930 | getcon pexpevt, r8 | ||
1931 | getcon tra, r9 | ||
1932 | getcon tea, r10 | ||
1933 | getcon kcr0, r11 | ||
1934 | getcon kcr1, r12 | ||
1935 | getcon vbr, r13 | ||
1936 | getcon resvec, r14 | ||
1937 | |||
1938 | st.q r0, 0x240, r1 | ||
1939 | st.q r0, 0x248, r2 | ||
1940 | st.q r0, 0x250, r3 | ||
1941 | st.q r0, 0x258, r4 | ||
1942 | st.q r0, 0x260, r5 | ||
1943 | st.q r0, 0x268, r6 | ||
1944 | st.q r0, 0x270, r7 | ||
1945 | st.q r0, 0x278, r8 | ||
1946 | st.q r0, 0x280, r9 | ||
1947 | st.q r0, 0x288, r10 | ||
1948 | st.q r0, 0x290, r11 | ||
1949 | st.q r0, 0x298, r12 | ||
1950 | st.q r0, 0x2a0, r13 | ||
1951 | st.q r0, 0x2a8, r14 | ||
1952 | |||
1953 | getcon SPC,r2 | ||
1954 | getcon SSR,r3 | ||
1955 | getcon EXPEVT,r4 | ||
1956 | /* Prepare to jump to C - physical address */ | ||
1957 | movi panic_handler-CONFIG_CACHED_MEMORY_OFFSET, r1 | ||
1958 | ori r1, 1, r1 | ||
1959 | ptabs r1, tr0 | ||
1960 | getcon DCR, SP | ||
1961 | blink tr0, ZERO | ||
1962 | nop | ||
1963 | nop | ||
1964 | nop | ||
1965 | nop | ||
1966 | |||
1967 | |||
1968 | |||
1969 | |||
1970 | /* | ||
1971 | * --- Signal Handling Section | ||
1972 | */ | ||
1973 | |||
1974 | /* | ||
1975 | * extern long long _sa_default_rt_restorer | ||
1976 | * extern long long _sa_default_restorer | ||
1977 | * | ||
1978 | * or, better, | ||
1979 | * | ||
1980 | * extern void _sa_default_rt_restorer(void) | ||
1981 | * extern void _sa_default_restorer(void) | ||
1982 | * | ||
1983 | * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn() | ||
1984 | * from user space. Copied into user space by signal management. | ||
1985 | * Both must be quad aligned and 2 quad long (4 instructions). | ||
1986 | * | ||
1987 | */ | ||
1988 | .balign 8 | ||
1989 | .global sa_default_rt_restorer | ||
1990 | sa_default_rt_restorer: | ||
1991 | movi 0x10, r9 | ||
1992 | shori __NR_rt_sigreturn, r9 | ||
1993 | trapa r9 | ||
1994 | nop | ||
1995 | |||
1996 | .balign 8 | ||
1997 | .global sa_default_restorer | ||
1998 | sa_default_restorer: | ||
1999 | movi 0x10, r9 | ||
2000 | shori __NR_sigreturn, r9 | ||
2001 | trapa r9 | ||
2002 | nop | ||
2003 | |||
2004 | /* | ||
2005 | * --- __ex_table Section | ||
2006 | */ | ||
2007 | |||
2008 | /* | ||
2009 | * User Access Exception Table. | ||
2010 | */ | ||
2011 | .section __ex_table, "a" | ||
2012 | |||
2013 | .global asm_uaccess_start /* Just a marker */ | ||
2014 | asm_uaccess_start: | ||
2015 | |||
2016 | .long ___copy_user1, ___copy_user_exit | ||
2017 | .long ___copy_user2, ___copy_user_exit | ||
2018 | .long ___clear_user1, ___clear_user_exit | ||
2019 | .long ___strncpy_from_user1, ___strncpy_from_user_exit | ||
2020 | .long ___strnlen_user1, ___strnlen_user_exit | ||
2021 | .long ___get_user_asm_b1, ___get_user_asm_b_exit | ||
2022 | .long ___get_user_asm_w1, ___get_user_asm_w_exit | ||
2023 | .long ___get_user_asm_l1, ___get_user_asm_l_exit | ||
2024 | .long ___get_user_asm_q1, ___get_user_asm_q_exit | ||
2025 | .long ___put_user_asm_b1, ___put_user_asm_b_exit | ||
2026 | .long ___put_user_asm_w1, ___put_user_asm_w_exit | ||
2027 | .long ___put_user_asm_l1, ___put_user_asm_l_exit | ||
2028 | .long ___put_user_asm_q1, ___put_user_asm_q_exit | ||
2029 | |||
2030 | .global asm_uaccess_end /* Just a marker */ | ||
2031 | asm_uaccess_end: | ||
2032 | |||
2033 | |||
2034 | |||
2035 | |||
2036 | /* | ||
2037 | * --- .text.init Section | ||
2038 | */ | ||
2039 | |||
2040 | .section .text.init, "ax" | ||
2041 | |||
2042 | /* | ||
2043 | * void trap_init (void) | ||
2044 | * | ||
2045 | */ | ||
2046 | .global trap_init | ||
2047 | trap_init: | ||
2048 | addi SP, -24, SP /* Room to save r28/r29/r30 */ | ||
2049 | st.q SP, 0, r28 | ||
2050 | st.q SP, 8, r29 | ||
2051 | st.q SP, 16, r30 | ||
2052 | |||
2053 | /* Set VBR and RESVEC */ | ||
2054 | movi LVBR_block, r19 | ||
2055 | andi r19, -4, r19 /* reset MMUOFF + reserved */ | ||
2056 | /* For RESVEC exceptions we force the MMU off, which means we need the | ||
2057 | physical address. */ | ||
2058 | movi LRESVEC_block-CONFIG_CACHED_MEMORY_OFFSET, r20 | ||
2059 | andi r20, -4, r20 /* reset reserved */ | ||
2060 | ori r20, 1, r20 /* set MMUOFF */ | ||
2061 | putcon r19, VBR | ||
2062 | putcon r20, RESVEC | ||
2063 | |||
2064 | /* Sanity check */ | ||
2065 | movi LVBR_block_end, r21 | ||
2066 | andi r21, -4, r21 | ||
2067 | movi BLOCK_SIZE, r29 /* r29 = expected size */ | ||
2068 | or r19, ZERO, r30 | ||
2069 | add r19, r29, r19 | ||
2070 | |||
2071 | /* | ||
2072 | * Ugly, but better loop forever now than crash afterwards. | ||
2073 | * We should print a message, but if we touch LVBR or | ||
2074 | * LRESVEC blocks we should not be surprised if we get stuck | ||
2075 | * in trap_init(). | ||
2076 | */ | ||
2077 | pta trap_init_loop, tr1 | ||
2078 | gettr tr1, r28 /* r28 = trap_init_loop */ | ||
2079 | sub r21, r30, r30 /* r30 = actual size */ | ||
2080 | |||
2081 | /* | ||
2082 | * VBR/RESVEC handlers overlap by being bigger than | ||
2083 | * allowed. Very bad. Just loop forever. | ||
2084 | * (r28) panic/loop address | ||
2085 | * (r29) expected size | ||
2086 | * (r30) actual size | ||
2087 | */ | ||
2088 | trap_init_loop: | ||
2089 | bne r19, r21, tr1 | ||
2090 | |||
2091 | /* Now that exception vectors are set up reset SR.BL */ | ||
2092 | getcon SR, r22 | ||
2093 | movi SR_UNBLOCK_EXC, r23 | ||
2094 | and r22, r23, r22 | ||
2095 | putcon r22, SR | ||
2096 | |||
2097 | addi SP, 24, SP | ||
2098 | ptabs LINK, tr0 | ||
2099 | blink tr0, ZERO | ||
2100 | |||