aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/rtrap_32.S
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2008-12-03 06:08:37 -0500
committerDavid S. Miller <davem@davemloft.net>2008-12-04 12:17:20 -0500
commitd670bd4f803c8b646acd20f3ba21e65458293faf (patch)
treeeabc30aadce1556023c4aa445c649ba9e1d3f352 /arch/sparc/kernel/rtrap_32.S
parent478b8fecda511942404ac232897a718cecd13e48 (diff)
sparc: prepare kernel/ for unification
o sparc32 files with identical names to sparc64 renamed to <name>_32.S o introduced a few Kconfig helpers to simplify Makefile logic o refactored Makefile to prepare for unification - use obj-$(CONFIG_SPARC32) for sparc32 specific files - use <name>_$(BITS) for files where sparc64 has a _64 variant - sparc64 directly include a few files where sparc32 builds them, refer to these files directly (no BITS) - sneaked in -Werror as used by sparc64 o modified sparc/Makefile to use the new names for head/init_task Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/rtrap_32.S')
-rw-r--r--arch/sparc/kernel/rtrap_32.S322
1 files changed, 322 insertions, 0 deletions
diff --git a/arch/sparc/kernel/rtrap_32.S b/arch/sparc/kernel/rtrap_32.S
new file mode 100644
index 000000000000..4da2e1f66290
--- /dev/null
+++ b/arch/sparc/kernel/rtrap_32.S
@@ -0,0 +1,322 @@
1/*
2 * rtrap.S: Return from Sparc trap low-level code.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#include <asm/page.h>
8#include <asm/ptrace.h>
9#include <asm/psr.h>
10#include <asm/asi.h>
11#include <asm/smp.h>
12#include <asm/contregs.h>
13#include <asm/winmacro.h>
14#include <asm/asmmacro.h>
15#include <asm/thread_info.h>
16
17#define t_psr l0
18#define t_pc l1
19#define t_npc l2
20#define t_wim l3
21#define twin_tmp1 l4
22#define glob_tmp g4
23#define curptr g6
24
25 /* 7 WINDOW SPARC PATCH INSTRUCTIONS */
26 .globl rtrap_7win_patch1, rtrap_7win_patch2, rtrap_7win_patch3
27 .globl rtrap_7win_patch4, rtrap_7win_patch5
28rtrap_7win_patch1: srl %t_wim, 0x6, %glob_tmp
29rtrap_7win_patch2: and %glob_tmp, 0x7f, %glob_tmp
30rtrap_7win_patch3: srl %g1, 7, %g2
31rtrap_7win_patch4: srl %g2, 6, %g2
32rtrap_7win_patch5: and %g1, 0x7f, %g1
33 /* END OF PATCH INSTRUCTIONS */
34
35 /* We need to check for a few things which are:
36 * 1) The need to call schedule() because this
37 * processes quantum is up.
38 * 2) Pending signals for this process, if any
39 * exist we need to call do_signal() to do
40 * the needy.
41 *
42 * Else we just check if the rett would land us
43 * in an invalid window, if so we need to grab
44 * it off the user/kernel stack first.
45 */
46
47 .globl ret_trap_entry, rtrap_patch1, rtrap_patch2
48 .globl rtrap_patch3, rtrap_patch4, rtrap_patch5
49 .globl ret_trap_lockless_ipi
50ret_trap_entry:
51ret_trap_lockless_ipi:
52 andcc %t_psr, PSR_PS, %g0
53 sethi %hi(PSR_SYSCALL), %g1
54 be 1f
55 andn %t_psr, %g1, %t_psr
56
57 wr %t_psr, 0x0, %psr
58 b ret_trap_kernel
59 nop
60
611:
62 ld [%curptr + TI_FLAGS], %g2
63 andcc %g2, (_TIF_NEED_RESCHED), %g0
64 be signal_p
65 nop
66
67 call schedule
68 nop
69
70 ld [%curptr + TI_FLAGS], %g2
71signal_p:
72 andcc %g2, _TIF_DO_NOTIFY_RESUME_MASK, %g0
73 bz,a ret_trap_continue
74 ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
75
76 mov %g2, %o2
77 mov %l5, %o1
78 call do_notify_resume
79 add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr
80
81 /* Fall through. */
82 ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
83 clr %l6
84ret_trap_continue:
85 sethi %hi(PSR_SYSCALL), %g1
86 andn %t_psr, %g1, %t_psr
87 wr %t_psr, 0x0, %psr
88 WRITE_PAUSE
89
90 ld [%curptr + TI_W_SAVED], %twin_tmp1
91 orcc %g0, %twin_tmp1, %g0
92 be ret_trap_nobufwins
93 nop
94
95 wr %t_psr, PSR_ET, %psr
96 WRITE_PAUSE
97
98 mov 1, %o1
99 call try_to_clear_window_buffer
100 add %sp, STACKFRAME_SZ, %o0
101
102 b signal_p
103 ld [%curptr + TI_FLAGS], %g2
104
105ret_trap_nobufwins:
106 /* Load up the user's out registers so we can pull
107 * a window from the stack, if necessary.
108 */
109 LOAD_PT_INS(sp)
110
111 /* If there are already live user windows in the
112 * set we can return from trap safely.
113 */
114 ld [%curptr + TI_UWINMASK], %twin_tmp1
115 orcc %g0, %twin_tmp1, %g0
116 bne ret_trap_userwins_ok
117 nop
118
119 /* Calculate new %wim, we have to pull a register
120 * window from the users stack.
121 */
122ret_trap_pull_one_window:
123 rd %wim, %t_wim
124 sll %t_wim, 0x1, %twin_tmp1
125rtrap_patch1: srl %t_wim, 0x7, %glob_tmp
126 or %glob_tmp, %twin_tmp1, %glob_tmp
127rtrap_patch2: and %glob_tmp, 0xff, %glob_tmp
128
129 wr %glob_tmp, 0x0, %wim
130
131 /* Here comes the architecture specific
132 * branch to the user stack checking routine
133 * for return from traps.
134 */
135 .globl rtrap_mmu_patchme
136rtrap_mmu_patchme: b sun4c_rett_stackchk
137 andcc %fp, 0x7, %g0
138
139ret_trap_userwins_ok:
140 LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc)
141 or %t_pc, %t_npc, %g2
142 andcc %g2, 0x3, %g0
143 sethi %hi(PSR_SYSCALL), %g2
144 be 1f
145 andn %t_psr, %g2, %t_psr
146
147 b ret_trap_unaligned_pc
148 add %sp, STACKFRAME_SZ, %o0
149
1501:
151 LOAD_PT_YREG(sp, g1)
152 LOAD_PT_GLOBALS(sp)
153
154 wr %t_psr, 0x0, %psr
155 WRITE_PAUSE
156
157 jmp %t_pc
158 rett %t_npc
159
160ret_trap_unaligned_pc:
161 ld [%sp + STACKFRAME_SZ + PT_PC], %o1
162 ld [%sp + STACKFRAME_SZ + PT_NPC], %o2
163 ld [%sp + STACKFRAME_SZ + PT_PSR], %o3
164
165 wr %t_wim, 0x0, %wim ! or else...
166
167 wr %t_psr, PSR_ET, %psr
168 WRITE_PAUSE
169
170 call do_memaccess_unaligned
171 nop
172
173 b signal_p
174 ld [%curptr + TI_FLAGS], %g2
175
176ret_trap_kernel:
177 /* Will the rett land us in the invalid window? */
178 mov 2, %g1
179 sll %g1, %t_psr, %g1
180rtrap_patch3: srl %g1, 8, %g2
181 or %g1, %g2, %g1
182 rd %wim, %g2
183 andcc %g2, %g1, %g0
184 be 1f ! Nope, just return from the trap
185 sll %g2, 0x1, %g1
186
187 /* We have to grab a window before returning. */
188rtrap_patch4: srl %g2, 7, %g2
189 or %g1, %g2, %g1
190rtrap_patch5: and %g1, 0xff, %g1
191
192 wr %g1, 0x0, %wim
193
194 /* Grrr, make sure we load from the right %sp... */
195 LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
196
197 restore %g0, %g0, %g0
198 LOAD_WINDOW(sp)
199 b 2f
200 save %g0, %g0, %g0
201
202 /* Reload the entire frame in case this is from a
203 * kernel system call or whatever...
204 */
2051:
206 LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
2072:
208 sethi %hi(PSR_SYSCALL), %twin_tmp1
209 andn %t_psr, %twin_tmp1, %t_psr
210 wr %t_psr, 0x0, %psr
211 WRITE_PAUSE
212
213 jmp %t_pc
214 rett %t_npc
215
216ret_trap_user_stack_is_bolixed:
217 wr %t_wim, 0x0, %wim
218
219 wr %t_psr, PSR_ET, %psr
220 WRITE_PAUSE
221
222 call window_ret_fault
223 add %sp, STACKFRAME_SZ, %o0
224
225 b signal_p
226 ld [%curptr + TI_FLAGS], %g2
227
228sun4c_rett_stackchk:
229 be 1f
230 and %fp, 0xfff, %g1 ! delay slot
231
232 b ret_trap_user_stack_is_bolixed + 0x4
233 wr %t_wim, 0x0, %wim
234
235 /* See if we have to check the sanity of one page or two */
2361:
237 add %g1, 0x38, %g1
238 sra %fp, 29, %g2
239 add %g2, 0x1, %g2
240 andncc %g2, 0x1, %g0
241 be 1f
242 andncc %g1, 0xff8, %g0
243
244 /* %sp is in vma hole, yuck */
245 b ret_trap_user_stack_is_bolixed + 0x4
246 wr %t_wim, 0x0, %wim
247
2481:
249 be sun4c_rett_onepage /* Only one page to check */
250 lda [%fp] ASI_PTE, %g2
251
252sun4c_rett_twopages:
253 add %fp, 0x38, %g1
254 sra %g1, 29, %g2
255 add %g2, 0x1, %g2
256 andncc %g2, 0x1, %g0
257 be 1f
258 lda [%g1] ASI_PTE, %g2
259
260 /* Second page is in vma hole */
261 b ret_trap_user_stack_is_bolixed + 0x4
262 wr %t_wim, 0x0, %wim
263
2641:
265 srl %g2, 29, %g2
266 andcc %g2, 0x4, %g0
267 bne sun4c_rett_onepage
268 lda [%fp] ASI_PTE, %g2
269
270 /* Second page has bad perms */
271 b ret_trap_user_stack_is_bolixed + 0x4
272 wr %t_wim, 0x0, %wim
273
274sun4c_rett_onepage:
275 srl %g2, 29, %g2
276 andcc %g2, 0x4, %g0
277 bne,a 1f
278 restore %g0, %g0, %g0
279
280 /* A page had bad page permissions, losing... */
281 b ret_trap_user_stack_is_bolixed + 0x4
282 wr %t_wim, 0x0, %wim
283
284 /* Whee, things are ok, load the window and continue. */
2851:
286 LOAD_WINDOW(sp)
287
288 b ret_trap_userwins_ok
289 save %g0, %g0, %g0
290
291 .globl srmmu_rett_stackchk
292srmmu_rett_stackchk:
293 bne ret_trap_user_stack_is_bolixed
294 sethi %hi(PAGE_OFFSET), %g1
295 cmp %g1, %fp
296 bleu ret_trap_user_stack_is_bolixed
297 mov AC_M_SFSR, %g1
298 lda [%g1] ASI_M_MMUREGS, %g0
299
300 lda [%g0] ASI_M_MMUREGS, %g1
301 or %g1, 0x2, %g1
302 sta %g1, [%g0] ASI_M_MMUREGS
303
304 restore %g0, %g0, %g0
305
306 LOAD_WINDOW(sp)
307
308 save %g0, %g0, %g0
309
310 andn %g1, 0x2, %g1
311 sta %g1, [%g0] ASI_M_MMUREGS
312
313 mov AC_M_SFAR, %g2
314 lda [%g2] ASI_M_MMUREGS, %g2
315
316 mov AC_M_SFSR, %g1
317 lda [%g1] ASI_M_MMUREGS, %g1
318 andcc %g1, 0x2, %g0
319 be ret_trap_userwins_ok
320 nop
321
322 b,a ret_trap_user_stack_is_bolixed