aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/rtrap_64.S
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2008-12-03 06:11:52 -0500
committerDavid S. Miller <davem@davemloft.net>2008-12-04 12:17:21 -0500
commita88b5ba8bd8ac18aad65ee6c6a254e2e74876db3 (patch)
treeeb3d0ffaf53c3f7ec6083752c2097cecd1cb892a /arch/sparc/kernel/rtrap_64.S
parentd670bd4f803c8b646acd20f3ba21e65458293faf (diff)
sparc,sparc64: unify kernel/
o Move all files from sparc64/kernel/ to sparc/kernel - rename as appropriate o Update sparc/Makefile to the changes o Update sparc/kernel/Makefile to include the sparc64 files NOTE: This commit changes link order on sparc64! Link order had to change for either of sparc32 and sparc64. And assuming sparc64 see more testing than sparc32 change link order on sparc64 where issues will be caught faster. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/rtrap_64.S')
-rw-r--r--arch/sparc/kernel/rtrap_64.S450
1 files changed, 450 insertions, 0 deletions
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
new file mode 100644
index 000000000000..fd3cee4d117c
--- /dev/null
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -0,0 +1,450 @@
1/*
2 * rtrap.S: Preparing for return from trap on Sparc V9.
3 *
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8
9#include <asm/asi.h>
10#include <asm/pstate.h>
11#include <asm/ptrace.h>
12#include <asm/spitfire.h>
13#include <asm/head.h>
14#include <asm/visasm.h>
15#include <asm/processor.h>
16
17#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
18#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
19#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
20
21 .text
22 .align 32
23__handle_softirq:
24 call do_softirq
25 nop
26 ba,a,pt %xcc, __handle_softirq_continue
27 nop
28__handle_preemption:
29 call schedule
30 wrpr %g0, RTRAP_PSTATE, %pstate
31 ba,pt %xcc, __handle_preemption_continue
32 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
33
34__handle_user_windows:
35 call fault_in_user_windows
36 wrpr %g0, RTRAP_PSTATE, %pstate
37 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
38 /* Redo sched+sig checks */
39 ldx [%g6 + TI_FLAGS], %l0
40 andcc %l0, _TIF_NEED_RESCHED, %g0
41
42 be,pt %xcc, 1f
43 nop
44 call schedule
45 wrpr %g0, RTRAP_PSTATE, %pstate
46 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
47 ldx [%g6 + TI_FLAGS], %l0
48
491: andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
50 be,pt %xcc, __handle_user_windows_continue
51 nop
52 mov %l5, %o1
53 add %sp, PTREGS_OFF, %o0
54 mov %l0, %o2
55
56 call do_notify_resume
57 wrpr %g0, RTRAP_PSTATE, %pstate
58 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
59 /* Signal delivery can modify pt_regs tstate, so we must
60 * reload it.
61 */
62 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
63 sethi %hi(0xf << 20), %l4
64 and %l1, %l4, %l4
65 ba,pt %xcc, __handle_user_windows_continue
66
67 andn %l1, %l4, %l1
68__handle_perfctrs:
69 call update_perfctrs
70 wrpr %g0, RTRAP_PSTATE, %pstate
71 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
72 ldub [%g6 + TI_WSAVED], %o2
73 brz,pt %o2, 1f
74 nop
75 /* Redo userwin+sched+sig checks */
76 call fault_in_user_windows
77
78 wrpr %g0, RTRAP_PSTATE, %pstate
79 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
80 ldx [%g6 + TI_FLAGS], %l0
81 andcc %l0, _TIF_NEED_RESCHED, %g0
82 be,pt %xcc, 1f
83
84 nop
85 call schedule
86 wrpr %g0, RTRAP_PSTATE, %pstate
87 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
88 ldx [%g6 + TI_FLAGS], %l0
891: andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
90
91 be,pt %xcc, __handle_perfctrs_continue
92 sethi %hi(TSTATE_PEF), %o0
93 mov %l5, %o1
94 add %sp, PTREGS_OFF, %o0
95 mov %l0, %o2
96 call do_notify_resume
97
98 wrpr %g0, RTRAP_PSTATE, %pstate
99 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
100 /* Signal delivery can modify pt_regs tstate, so we must
101 * reload it.
102 */
103 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
104 sethi %hi(0xf << 20), %l4
105 and %l1, %l4, %l4
106 andn %l1, %l4, %l1
107 ba,pt %xcc, __handle_perfctrs_continue
108
109 sethi %hi(TSTATE_PEF), %o0
110__handle_userfpu:
111 rd %fprs, %l5
112 andcc %l5, FPRS_FEF, %g0
113 sethi %hi(TSTATE_PEF), %o0
114 be,a,pn %icc, __handle_userfpu_continue
115 andn %l1, %o0, %l1
116 ba,a,pt %xcc, __handle_userfpu_continue
117
118__handle_signal:
119 mov %l5, %o1
120 add %sp, PTREGS_OFF, %o0
121 mov %l0, %o2
122 call do_notify_resume
123 wrpr %g0, RTRAP_PSTATE, %pstate
124 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
125
126 /* Signal delivery can modify pt_regs tstate, so we must
127 * reload it.
128 */
129 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
130 sethi %hi(0xf << 20), %l4
131 and %l1, %l4, %l4
132 ba,pt %xcc, __handle_signal_continue
133 andn %l1, %l4, %l1
134
135 /* When returning from a NMI (%pil==15) interrupt we want to
136 * avoid running softirqs, doing IRQ tracing, preempting, etc.
137 */
138 .globl rtrap_nmi
139rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
140 sethi %hi(0xf << 20), %l4
141 and %l1, %l4, %l4
142 andn %l1, %l4, %l1
143 srl %l4, 20, %l4
144 ba,pt %xcc, rtrap_no_irq_enable
145 wrpr %l4, %pil
146
147 .align 64
148 .globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
149rtrap_irq:
150rtrap:
151#ifndef CONFIG_SMP
152 sethi %hi(per_cpu____cpu_data), %l0
153 lduw [%l0 + %lo(per_cpu____cpu_data)], %l1
154#else
155 sethi %hi(per_cpu____cpu_data), %l0
156 or %l0, %lo(per_cpu____cpu_data), %l0
157 lduw [%l0 + %g5], %l1
158#endif
159 cmp %l1, 0
160
161 /* mm/ultra.S:xcall_report_regs KNOWS about this load. */
162 bne,pn %icc, __handle_softirq
163 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
164__handle_softirq_continue:
165rtrap_xcall:
166 sethi %hi(0xf << 20), %l4
167 and %l1, %l4, %l4
168 andn %l1, %l4, %l1
169 srl %l4, 20, %l4
170#ifdef CONFIG_TRACE_IRQFLAGS
171 brnz,pn %l4, rtrap_no_irq_enable
172 nop
173 call trace_hardirqs_on
174 nop
175 wrpr %l4, %pil
176#endif
177rtrap_no_irq_enable:
178 andcc %l1, TSTATE_PRIV, %l3
179 bne,pn %icc, to_kernel
180 nop
181
182 /* We must hold IRQs off and atomically test schedule+signal
183 * state, then hold them off all the way back to userspace.
184 * If we are returning to kernel, none of this matters. Note
185 * that we are disabling interrupts via PSTATE_IE, not using
186 * %pil.
187 *
188 * If we do not do this, there is a window where we would do
189 * the tests, later the signal/resched event arrives but we do
190 * not process it since we are still in kernel mode. It would
191 * take until the next local IRQ before the signal/resched
192 * event would be handled.
193 *
194 * This also means that if we have to deal with performance
195 * counters or user windows, we have to redo all of these
196 * sched+signal checks with IRQs disabled.
197 */
198to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
199 wrpr 0, %pil
200__handle_preemption_continue:
201 ldx [%g6 + TI_FLAGS], %l0
202 sethi %hi(_TIF_USER_WORK_MASK), %o0
203 or %o0, %lo(_TIF_USER_WORK_MASK), %o0
204 andcc %l0, %o0, %g0
205 sethi %hi(TSTATE_PEF), %o0
206 be,pt %xcc, user_nowork
207 andcc %l1, %o0, %g0
208 andcc %l0, _TIF_NEED_RESCHED, %g0
209 bne,pn %xcc, __handle_preemption
210 andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
211 bne,pn %xcc, __handle_signal
212__handle_signal_continue:
213 ldub [%g6 + TI_WSAVED], %o2
214 brnz,pn %o2, __handle_user_windows
215 nop
216__handle_user_windows_continue:
217 ldx [%g6 + TI_FLAGS], %l5
218 andcc %l5, _TIF_PERFCTR, %g0
219 sethi %hi(TSTATE_PEF), %o0
220 bne,pn %xcc, __handle_perfctrs
221__handle_perfctrs_continue:
222 andcc %l1, %o0, %g0
223
224 /* This fpdepth clear is necessary for non-syscall rtraps only */
225user_nowork:
226 bne,pn %xcc, __handle_userfpu
227 stb %g0, [%g6 + TI_FPDEPTH]
228__handle_userfpu_continue:
229
230rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
231 ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2
232
233 ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3
234 ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4
235 ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5
236 brz,pt %l3, 1f
237 mov %g6, %l2
238
239 /* Must do this before thread reg is clobbered below. */
240 LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2)
2411:
242 ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6
243 ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7
244
245 /* Normal globals are restored, go to trap globals. */
246661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
247 nop
248 .section .sun4v_2insn_patch, "ax"
249 .word 661b
250 wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
251 SET_GL(1)
252 .previous
253
254 mov %l2, %g6
255
256 ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
257 ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
258
259 ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
260 ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
261 ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
262 ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
263 ldx [%sp + PTREGS_OFF + PT_V9_I6], %i6
264 ldx [%sp + PTREGS_OFF + PT_V9_I7], %i7
265 ldx [%sp + PTREGS_OFF + PT_V9_TPC], %l2
266 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %o2
267
268 ld [%sp + PTREGS_OFF + PT_V9_Y], %o3
269 wr %o3, %g0, %y
270 wrpr %l4, 0x0, %pil
271 wrpr %g0, 0x1, %tl
272 andn %l1, TSTATE_SYSCALL, %l1
273 wrpr %l1, %g0, %tstate
274 wrpr %l2, %g0, %tpc
275 wrpr %o2, %g0, %tnpc
276
277 brnz,pn %l3, kern_rtt
278 mov PRIMARY_CONTEXT, %l7
279
280661: ldxa [%l7 + %l7] ASI_DMMU, %l0
281 .section .sun4v_1insn_patch, "ax"
282 .word 661b
283 ldxa [%l7 + %l7] ASI_MMU, %l0
284 .previous
285
286 sethi %hi(sparc64_kern_pri_nuc_bits), %l1
287 ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
288 or %l0, %l1, %l0
289
290661: stxa %l0, [%l7] ASI_DMMU
291 .section .sun4v_1insn_patch, "ax"
292 .word 661b
293 stxa %l0, [%l7] ASI_MMU
294 .previous
295
296 sethi %hi(KERNBASE), %l7
297 flush %l7
298 rdpr %wstate, %l1
299 rdpr %otherwin, %l2
300 srl %l1, 3, %l1
301
302 wrpr %l2, %g0, %canrestore
303 wrpr %l1, %g0, %wstate
304 brnz,pt %l2, user_rtt_restore
305 wrpr %g0, %g0, %otherwin
306
307 ldx [%g6 + TI_FLAGS], %g3
308 wr %g0, ASI_AIUP, %asi
309 rdpr %cwp, %g1
310 andcc %g3, _TIF_32BIT, %g0
311 sub %g1, 1, %g1
312 bne,pt %xcc, user_rtt_fill_32bit
313 wrpr %g1, %cwp
314 ba,a,pt %xcc, user_rtt_fill_64bit
315
316user_rtt_fill_fixup:
317 rdpr %cwp, %g1
318 add %g1, 1, %g1
319 wrpr %g1, 0x0, %cwp
320
321 rdpr %wstate, %g2
322 sll %g2, 3, %g2
323 wrpr %g2, 0x0, %wstate
324
325 /* We know %canrestore and %otherwin are both zero. */
326
327 sethi %hi(sparc64_kern_pri_context), %g2
328 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
329 mov PRIMARY_CONTEXT, %g1
330
331661: stxa %g2, [%g1] ASI_DMMU
332 .section .sun4v_1insn_patch, "ax"
333 .word 661b
334 stxa %g2, [%g1] ASI_MMU
335 .previous
336
337 sethi %hi(KERNBASE), %g1
338 flush %g1
339
340 or %g4, FAULT_CODE_WINFIXUP, %g4
341 stb %g4, [%g6 + TI_FAULT_CODE]
342 stx %g5, [%g6 + TI_FAULT_ADDR]
343
344 mov %g6, %l1
345 wrpr %g0, 0x0, %tl
346
347661: nop
348 .section .sun4v_1insn_patch, "ax"
349 .word 661b
350 SET_GL(0)
351 .previous
352
353 wrpr %g0, RTRAP_PSTATE, %pstate
354
355 mov %l1, %g6
356 ldx [%g6 + TI_TASK], %g4
357 LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
358 call do_sparc64_fault
359 add %sp, PTREGS_OFF, %o0
360 ba,pt %xcc, rtrap
361 nop
362
363user_rtt_pre_restore:
364 add %g1, 1, %g1
365 wrpr %g1, 0x0, %cwp
366
367user_rtt_restore:
368 restore
369 rdpr %canrestore, %g1
370 wrpr %g1, 0x0, %cleanwin
371 retry
372 nop
373
374kern_rtt: rdpr %canrestore, %g1
375 brz,pn %g1, kern_rtt_fill
376 nop
377kern_rtt_restore:
378 stw %g0, [%sp + PTREGS_OFF + PT_V9_MAGIC]
379 restore
380 retry
381
382to_kernel:
383#ifdef CONFIG_PREEMPT
384 ldsw [%g6 + TI_PRE_COUNT], %l5
385 brnz %l5, kern_fpucheck
386 ldx [%g6 + TI_FLAGS], %l5
387 andcc %l5, _TIF_NEED_RESCHED, %g0
388 be,pt %xcc, kern_fpucheck
389 nop
390 cmp %l4, 0
391 bne,pn %xcc, kern_fpucheck
392 sethi %hi(PREEMPT_ACTIVE), %l6
393 stw %l6, [%g6 + TI_PRE_COUNT]
394 call schedule
395 nop
396 ba,pt %xcc, rtrap
397 stw %g0, [%g6 + TI_PRE_COUNT]
398#endif
399kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
400 brz,pt %l5, rt_continue
401 srl %l5, 1, %o0
402 add %g6, TI_FPSAVED, %l6
403 ldub [%l6 + %o0], %l2
404 sub %l5, 2, %l5
405
406 add %g6, TI_GSR, %o1
407 andcc %l2, (FPRS_FEF|FPRS_DU), %g0
408 be,pt %icc, 2f
409 and %l2, FPRS_DL, %l6
410 andcc %l2, FPRS_FEF, %g0
411 be,pn %icc, 5f
412 sll %o0, 3, %o5
413 rd %fprs, %g1
414
415 wr %g1, FPRS_FEF, %fprs
416 ldx [%o1 + %o5], %g1
417 add %g6, TI_XFSR, %o1
418 sll %o0, 8, %o2
419 add %g6, TI_FPREGS, %o3
420 brz,pn %l6, 1f
421 add %g6, TI_FPREGS+0x40, %o4
422
423 membar #Sync
424 ldda [%o3 + %o2] ASI_BLK_P, %f0
425 ldda [%o4 + %o2] ASI_BLK_P, %f16
426 membar #Sync
4271: andcc %l2, FPRS_DU, %g0
428 be,pn %icc, 1f
429 wr %g1, 0, %gsr
430 add %o2, 0x80, %o2
431 membar #Sync
432 ldda [%o3 + %o2] ASI_BLK_P, %f32
433 ldda [%o4 + %o2] ASI_BLK_P, %f48
4341: membar #Sync
435 ldx [%o1 + %o5], %fsr
4362: stb %l5, [%g6 + TI_FPDEPTH]
437 ba,pt %xcc, rt_continue
438 nop
4395: wr %g0, FPRS_FEF, %fprs
440 sll %o0, 8, %o2
441
442 add %g6, TI_FPREGS+0x80, %o3
443 add %g6, TI_FPREGS+0xc0, %o4
444 membar #Sync
445 ldda [%o3 + %o2] ASI_BLK_P, %f32
446 ldda [%o4 + %o2] ASI_BLK_P, %f48
447 membar #Sync
448 wr %g0, FPRS_DU, %fprs
449 ba,pt %xcc, rt_continue
450 stb %l5, [%g6 + TI_FPDEPTH]