aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/entry_32.S
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2005-10-10 08:36:14 -0400
committerPaul Mackerras <paulus@samba.org>2005-10-10 08:36:14 -0400
commit9994a33865f4d55c44c9731c01e1f891543278de (patch)
tree77d8fe580493dbf9ce1820a703c482fba291b6b9 /arch/powerpc/kernel/entry_32.S
parent06d67d54741a5bfefa31945ef195dfa748c29025 (diff)
powerpc: Introduce entry_{32,64}.S, misc_{32,64}.S, systbl.S
The system call table has been consolidated into systbl.S. We have separate 32-bit and 64-bit versions of entry.S and misc.S since the code is mostly sufficiently different to be not worth merging. There are some common bits that will be extracted in future. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/entry_32.S')
-rw-r--r--arch/powerpc/kernel/entry_32.S1002
1 files changed, 1002 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
new file mode 100644
index 000000000000..094eea6fbd69
--- /dev/null
+++ b/arch/powerpc/kernel/entry_32.S
@@ -0,0 +1,1002 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/config.h>
23#include <linux/errno.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34
35#undef SHOW_SYSCALLS
36#undef SHOW_SYSCALLS_TASK
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x) li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
48#include "head_booke.h"
49#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
50 mtspr exc_level##_SPRG,r8; \
51 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
52 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
53 stw r0,GPR10(r11); \
54 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
55 stw r0,GPR11(r11); \
56 mfspr r8,exc_level##_SPRG
57
58 .globl mcheck_transfer_to_handler
59mcheck_transfer_to_handler:
60 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
61 b transfer_to_handler_full
62
63 .globl debug_transfer_to_handler
64debug_transfer_to_handler:
65 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
66 b transfer_to_handler_full
67
68 .globl crit_transfer_to_handler
69crit_transfer_to_handler:
70 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
71 /* fall through */
72#endif
73
74#ifdef CONFIG_40x
75 .globl crit_transfer_to_handler
76crit_transfer_to_handler:
77 lwz r0,crit_r10@l(0)
78 stw r0,GPR10(r11)
79 lwz r0,crit_r11@l(0)
80 stw r0,GPR11(r11)
81 /* fall through */
82#endif
83
84/*
85 * This code finishes saving the registers to the exception frame
86 * and jumps to the appropriate handler for the exception, turning
87 * on address translation.
88 * Note that we rely on the caller having set cr0.eq iff the exception
89 * occurred in kernel mode (i.e. MSR:PR = 0).
90 */
91 .globl transfer_to_handler_full
92transfer_to_handler_full:
93 SAVE_NVGPRS(r11)
94 /* fall through */
95
96 .globl transfer_to_handler
97transfer_to_handler:
98 stw r2,GPR2(r11)
99 stw r12,_NIP(r11)
100 stw r9,_MSR(r11)
101 andi. r2,r9,MSR_PR
102 mfctr r12
103 mfspr r2,SPRN_XER
104 stw r12,_CTR(r11)
105 stw r2,_XER(r11)
106 mfspr r12,SPRN_SPRG3
107 addi r2,r12,-THREAD
108 tovirt(r2,r2) /* set r2 to current */
109 beq 2f /* if from user, fix up THREAD.regs */
110 addi r11,r1,STACK_FRAME_OVERHEAD
111 stw r11,PT_REGS(r12)
112#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
113 /* Check to see if the dbcr0 register is set up to debug. Use the
114 single-step bit to do this. */
115 lwz r12,THREAD_DBCR0(r12)
116 andis. r12,r12,DBCR0_IC@h
117 beq+ 3f
118 /* From user and task is ptraced - load up global dbcr0 */
119 li r12,-1 /* clear all pending debug events */
120 mtspr SPRN_DBSR,r12
121 lis r11,global_dbcr0@ha
122 tophys(r11,r11)
123 addi r11,r11,global_dbcr0@l
124 lwz r12,0(r11)
125 mtspr SPRN_DBCR0,r12
126 lwz r12,4(r11)
127 addi r12,r12,-1
128 stw r12,4(r11)
129#endif
130 b 3f
1312: /* if from kernel, check interrupted DOZE/NAP mode and
132 * check for stack overflow
133 */
134#ifdef CONFIG_6xx
135 mfspr r11,SPRN_HID0
136 mtcr r11
137BEGIN_FTR_SECTION
138 bt- 8,power_save_6xx_restore /* Check DOZE */
139END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
140BEGIN_FTR_SECTION
141 bt- 9,power_save_6xx_restore /* Check NAP */
142END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
143#endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont
145transfer_to_handler_cont:
146 lwz r11,THREAD_INFO-THREAD(r12)
147 cmplw r1,r11 /* if r1 <= current->thread_info */
148 ble- stack_ovf /* then the kernel stack overflowed */
1493:
150 mflr r9
151 lwz r11,0(r9) /* virtual address of handler */
152 lwz r9,4(r9) /* where to go when done */
153 FIX_SRR1(r10,r12)
154 mtspr SPRN_SRR0,r11
155 mtspr SPRN_SRR1,r10
156 mtlr r9
157 SYNC
158 RFI /* jump to handler, enable MMU */
159
160/*
161 * On kernel stack overflow, load up an initial stack pointer
162 * and call StackOverflow(regs), which should not return.
163 */
164stack_ovf:
165 /* sometimes we use a statically-allocated stack, which is OK. */
166 lis r11,_end@h
167 ori r11,r11,_end@l
168 cmplw r1,r11
169 ble 3b /* r1 <= &_end is OK */
170 SAVE_NVGPRS(r11)
171 addi r3,r1,STACK_FRAME_OVERHEAD
172 lis r1,init_thread_union@ha
173 addi r1,r1,init_thread_union@l
174 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
175 lis r9,StackOverflow@ha
176 addi r9,r9,StackOverflow@l
177 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
178 FIX_SRR1(r10,r12)
179 mtspr SPRN_SRR0,r9
180 mtspr SPRN_SRR1,r10
181 SYNC
182 RFI
183
184/*
185 * Handle a system call.
186 */
187 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
188 .stabs "entry_32.S",N_SO,0,0,0f
1890:
190
191_GLOBAL(DoSyscall)
192 stw r0,THREAD+LAST_SYSCALL(r2)
193 stw r3,ORIG_GPR3(r1)
194 li r12,0
195 stw r12,RESULT(r1)
196 lwz r11,_CCR(r1) /* Clear SO bit in CR */
197 rlwinm r11,r11,0,4,2
198 stw r11,_CCR(r1)
199#ifdef SHOW_SYSCALLS
200 bl do_show_syscall
201#endif /* SHOW_SYSCALLS */
202 rlwinm r10,r1,0,0,18 /* current_thread_info() */
203 lwz r11,TI_LOCAL_FLAGS(r10)
204 rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR
205 stw r11,TI_LOCAL_FLAGS(r10)
206 lwz r11,TI_FLAGS(r10)
207 andi. r11,r11,_TIF_SYSCALL_T_OR_A
208 bne- syscall_dotrace
209syscall_dotrace_cont:
210 cmplwi 0,r0,NR_syscalls
211 lis r10,sys_call_table@h
212 ori r10,r10,sys_call_table@l
213 slwi r0,r0,2
214 bge- 66f
215 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
216 mtlr r10
217 addi r9,r1,STACK_FRAME_OVERHEAD
218 PPC440EP_ERR42
219 blrl /* Call handler */
220 .globl ret_from_syscall
221ret_from_syscall:
222#ifdef SHOW_SYSCALLS
223 bl do_show_syscall_exit
224#endif
225 mr r6,r3
226 li r11,-_LAST_ERRNO
227 cmplw 0,r3,r11
228 rlwinm r12,r1,0,0,18 /* current_thread_info() */
229 blt+ 30f
230 lwz r11,TI_LOCAL_FLAGS(r12)
231 andi. r11,r11,_TIFL_FORCE_NOERROR
232 bne 30f
233 neg r3,r3
234 lwz r10,_CCR(r1) /* Set SO bit in CR */
235 oris r10,r10,0x1000
236 stw r10,_CCR(r1)
237
238 /* disable interrupts so current_thread_info()->flags can't change */
23930: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
240 SYNC
241 MTMSRD(r10)
242 lwz r9,TI_FLAGS(r12)
243 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
244 bne- syscall_exit_work
245syscall_exit_cont:
246#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
247 /* If the process has its own DBCR0 value, load it up. The single
248 step bit tells us that dbcr0 should be loaded. */
249 lwz r0,THREAD+THREAD_DBCR0(r2)
250 andis. r10,r0,DBCR0_IC@h
251 bnel- load_dbcr0
252#endif
253 stwcx. r0,0,r1 /* to clear the reservation */
254 lwz r4,_LINK(r1)
255 lwz r5,_CCR(r1)
256 mtlr r4
257 mtcr r5
258 lwz r7,_NIP(r1)
259 lwz r8,_MSR(r1)
260 FIX_SRR1(r8, r0)
261 lwz r2,GPR2(r1)
262 lwz r1,GPR1(r1)
263 mtspr SPRN_SRR0,r7
264 mtspr SPRN_SRR1,r8
265 SYNC
266 RFI
267
26866: li r3,-ENOSYS
269 b ret_from_syscall
270
271 .globl ret_from_fork
272ret_from_fork:
273 REST_NVGPRS(r1)
274 bl schedule_tail
275 li r3,0
276 b ret_from_syscall
277
278/* Traced system call support */
279syscall_dotrace:
280 SAVE_NVGPRS(r1)
281 li r0,0xc00
282 stw r0,TRAP(r1)
283 addi r3,r1,STACK_FRAME_OVERHEAD
284 bl do_syscall_trace_enter
285 lwz r0,GPR0(r1) /* Restore original registers */
286 lwz r3,GPR3(r1)
287 lwz r4,GPR4(r1)
288 lwz r5,GPR5(r1)
289 lwz r6,GPR6(r1)
290 lwz r7,GPR7(r1)
291 lwz r8,GPR8(r1)
292 REST_NVGPRS(r1)
293 b syscall_dotrace_cont
294
295syscall_exit_work:
296 stw r6,RESULT(r1) /* Save result */
297 stw r3,GPR3(r1) /* Update return value */
298 andi. r0,r9,_TIF_SYSCALL_T_OR_A
299 beq 5f
300 ori r10,r10,MSR_EE
301 SYNC
302 MTMSRD(r10) /* re-enable interrupts */
303 lwz r4,TRAP(r1)
304 andi. r4,r4,1
305 beq 4f
306 SAVE_NVGPRS(r1)
307 li r4,0xc00
308 stw r4,TRAP(r1)
3094:
310 addi r3,r1,STACK_FRAME_OVERHEAD
311 bl do_syscall_trace_leave
312 REST_NVGPRS(r1)
3132:
314 lwz r3,GPR3(r1)
315 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
316 SYNC
317 MTMSRD(r10) /* disable interrupts again */
318 rlwinm r12,r1,0,0,18 /* current_thread_info() */
319 lwz r9,TI_FLAGS(r12)
3205:
321 andi. r0,r9,_TIF_NEED_RESCHED
322 bne 1f
323 lwz r5,_MSR(r1)
324 andi. r5,r5,MSR_PR
325 beq syscall_exit_cont
326 andi. r0,r9,_TIF_SIGPENDING
327 beq syscall_exit_cont
328 b do_user_signal
3291:
330 ori r10,r10,MSR_EE
331 SYNC
332 MTMSRD(r10) /* re-enable interrupts */
333 bl schedule
334 b 2b
335
336#ifdef SHOW_SYSCALLS
337do_show_syscall:
338#ifdef SHOW_SYSCALLS_TASK
339 lis r11,show_syscalls_task@ha
340 lwz r11,show_syscalls_task@l(r11)
341 cmp 0,r2,r11
342 bnelr
343#endif
344 stw r31,GPR31(r1)
345 mflr r31
346 lis r3,7f@ha
347 addi r3,r3,7f@l
348 lwz r4,GPR0(r1)
349 lwz r5,GPR3(r1)
350 lwz r6,GPR4(r1)
351 lwz r7,GPR5(r1)
352 lwz r8,GPR6(r1)
353 lwz r9,GPR7(r1)
354 bl printk
355 lis r3,77f@ha
356 addi r3,r3,77f@l
357 lwz r4,GPR8(r1)
358 mr r5,r2
359 bl printk
360 lwz r0,GPR0(r1)
361 lwz r3,GPR3(r1)
362 lwz r4,GPR4(r1)
363 lwz r5,GPR5(r1)
364 lwz r6,GPR6(r1)
365 lwz r7,GPR7(r1)
366 lwz r8,GPR8(r1)
367 mtlr r31
368 lwz r31,GPR31(r1)
369 blr
370
371do_show_syscall_exit:
372#ifdef SHOW_SYSCALLS_TASK
373 lis r11,show_syscalls_task@ha
374 lwz r11,show_syscalls_task@l(r11)
375 cmp 0,r2,r11
376 bnelr
377#endif
378 stw r31,GPR31(r1)
379 mflr r31
380 stw r3,RESULT(r1) /* Save result */
381 mr r4,r3
382 lis r3,79f@ha
383 addi r3,r3,79f@l
384 bl printk
385 lwz r3,RESULT(r1)
386 mtlr r31
387 lwz r31,GPR31(r1)
388 blr
389
3907: .string "syscall %d(%x, %x, %x, %x, %x, "
39177: .string "%x), current=%p\n"
39279: .string " -> %x\n"
393 .align 2,0
394
395#ifdef SHOW_SYSCALLS_TASK
396 .data
397 .globl show_syscalls_task
398show_syscalls_task:
399 .long -1
400 .text
401#endif
402#endif /* SHOW_SYSCALLS */
403
404/*
405 * The sigsuspend and rt_sigsuspend system calls can call do_signal
406 * and thus put the process into the stopped state where we might
407 * want to examine its user state with ptrace. Therefore we need
408 * to save all the nonvolatile registers (r13 - r31) before calling
409 * the C code.
410 */
411 .globl ppc_sigsuspend
412ppc_sigsuspend:
413 SAVE_NVGPRS(r1)
414 lwz r0,TRAP(r1)
415 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
416 stw r0,TRAP(r1) /* register set saved */
417 b sys_sigsuspend
418
419 .globl ppc_rt_sigsuspend
420ppc_rt_sigsuspend:
421 SAVE_NVGPRS(r1)
422 lwz r0,TRAP(r1)
423 rlwinm r0,r0,0,0,30
424 stw r0,TRAP(r1)
425 b sys_rt_sigsuspend
426
427 .globl ppc_fork
428ppc_fork:
429 SAVE_NVGPRS(r1)
430 lwz r0,TRAP(r1)
431 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
432 stw r0,TRAP(r1) /* register set saved */
433 b sys_fork
434
435 .globl ppc_vfork
436ppc_vfork:
437 SAVE_NVGPRS(r1)
438 lwz r0,TRAP(r1)
439 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
440 stw r0,TRAP(r1) /* register set saved */
441 b sys_vfork
442
443 .globl ppc_clone
444ppc_clone:
445 SAVE_NVGPRS(r1)
446 lwz r0,TRAP(r1)
447 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
448 stw r0,TRAP(r1) /* register set saved */
449 b sys_clone
450
451 .globl ppc_swapcontext
452ppc_swapcontext:
453 SAVE_NVGPRS(r1)
454 lwz r0,TRAP(r1)
455 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
456 stw r0,TRAP(r1) /* register set saved */
457 b sys_swapcontext
458
459/*
460 * Top-level page fault handling.
461 * This is in assembler because if do_page_fault tells us that
462 * it is a bad kernel page fault, we want to save the non-volatile
463 * registers before calling bad_page_fault.
464 */
465 .globl handle_page_fault
466handle_page_fault:
467 stw r4,_DAR(r1)
468 addi r3,r1,STACK_FRAME_OVERHEAD
469 bl do_page_fault
470 cmpwi r3,0
471 beq+ ret_from_except
472 SAVE_NVGPRS(r1)
473 lwz r0,TRAP(r1)
474 clrrwi r0,r0,1
475 stw r0,TRAP(r1)
476 mr r5,r3
477 addi r3,r1,STACK_FRAME_OVERHEAD
478 lwz r4,_DAR(r1)
479 bl bad_page_fault
480 b ret_from_except_full
481
482/*
483 * This routine switches between two different tasks. The process
484 * state of one is saved on its kernel stack. Then the state
485 * of the other is restored from its kernel stack. The memory
486 * management hardware is updated to the second process's state.
487 * Finally, we can return to the second process.
488 * On entry, r3 points to the THREAD for the current task, r4
489 * points to the THREAD for the new task.
490 *
491 * This routine is always called with interrupts disabled.
492 *
493 * Note: there are two ways to get to the "going out" portion
494 * of this code; either by coming in via the entry (_switch)
495 * or via "fork" which must set up an environment equivalent
496 * to the "_switch" path. If you change this , you'll have to
497 * change the fork code also.
498 *
499 * The code which creates the new task context is in 'copy_thread'
500 * in arch/ppc/kernel/process.c
501 */
502_GLOBAL(_switch)
503 stwu r1,-INT_FRAME_SIZE(r1)
504 mflr r0
505 stw r0,INT_FRAME_SIZE+4(r1)
506 /* r3-r12 are caller saved -- Cort */
507 SAVE_NVGPRS(r1)
508 stw r0,_NIP(r1) /* Return to switch caller */
509 mfmsr r11
510 li r0,MSR_FP /* Disable floating-point */
511#ifdef CONFIG_ALTIVEC
512BEGIN_FTR_SECTION
513 oris r0,r0,MSR_VEC@h /* Disable altivec */
514 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
515 stw r12,THREAD+THREAD_VRSAVE(r2)
516END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
517#endif /* CONFIG_ALTIVEC */
518#ifdef CONFIG_SPE
519 oris r0,r0,MSR_SPE@h /* Disable SPE */
520 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
521 stw r12,THREAD+THREAD_SPEFSCR(r2)
522#endif /* CONFIG_SPE */
523 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
524 beq+ 1f
525 andc r11,r11,r0
526 MTMSRD(r11)
527 isync
5281: stw r11,_MSR(r1)
529 mfcr r10
530 stw r10,_CCR(r1)
531 stw r1,KSP(r3) /* Set old stack pointer */
532
533#ifdef CONFIG_SMP
534 /* We need a sync somewhere here to make sure that if the
535 * previous task gets rescheduled on another CPU, it sees all
536 * stores it has performed on this one.
537 */
538 sync
539#endif /* CONFIG_SMP */
540
541 tophys(r0,r4)
542 CLR_TOP32(r0)
543 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
544 lwz r1,KSP(r4) /* Load new stack pointer */
545
546 /* save the old current 'last' for return value */
547 mr r3,r2
548 addi r2,r4,-THREAD /* Update current */
549
550#ifdef CONFIG_ALTIVEC
551BEGIN_FTR_SECTION
552 lwz r0,THREAD+THREAD_VRSAVE(r2)
553 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
554END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
555#endif /* CONFIG_ALTIVEC */
556#ifdef CONFIG_SPE
557 lwz r0,THREAD+THREAD_SPEFSCR(r2)
558 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
559#endif /* CONFIG_SPE */
560
561 lwz r0,_CCR(r1)
562 mtcrf 0xFF,r0
563 /* r3-r12 are destroyed -- Cort */
564 REST_NVGPRS(r1)
565
566 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
567 mtlr r4
568 addi r1,r1,INT_FRAME_SIZE
569 blr
570
571 .globl fast_exception_return
572fast_exception_return:
573#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
574 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
575 beq 1f /* if not, we've got problems */
576#endif
577
5782: REST_4GPRS(3, r11)
579 lwz r10,_CCR(r11)
580 REST_GPR(1, r11)
581 mtcr r10
582 lwz r10,_LINK(r11)
583 mtlr r10
584 REST_GPR(10, r11)
585 mtspr SPRN_SRR1,r9
586 mtspr SPRN_SRR0,r12
587 REST_GPR(9, r11)
588 REST_GPR(12, r11)
589 lwz r11,GPR11(r11)
590 SYNC
591 RFI
592
593#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
594/* check if the exception happened in a restartable section */
5951: lis r3,exc_exit_restart_end@ha
596 addi r3,r3,exc_exit_restart_end@l
597 cmplw r12,r3
598 bge 3f
599 lis r4,exc_exit_restart@ha
600 addi r4,r4,exc_exit_restart@l
601 cmplw r12,r4
602 blt 3f
603 lis r3,fee_restarts@ha
604 tophys(r3,r3)
605 lwz r5,fee_restarts@l(r3)
606 addi r5,r5,1
607 stw r5,fee_restarts@l(r3)
608 mr r12,r4 /* restart at exc_exit_restart */
609 b 2b
610
611 .comm fee_restarts,4
612
613/* aargh, a nonrecoverable interrupt, panic */
614/* aargh, we don't know which trap this is */
615/* but the 601 doesn't implement the RI bit, so assume it's OK */
6163:
617BEGIN_FTR_SECTION
618 b 2b
619END_FTR_SECTION_IFSET(CPU_FTR_601)
620 li r10,-1
621 stw r10,TRAP(r11)
622 addi r3,r1,STACK_FRAME_OVERHEAD
623 lis r10,MSR_KERNEL@h
624 ori r10,r10,MSR_KERNEL@l
625 bl transfer_to_handler_full
626 .long nonrecoverable_exception
627 .long ret_from_except
628#endif
629
630 .globl sigreturn_exit
631sigreturn_exit:
632 subi r1,r3,STACK_FRAME_OVERHEAD
633 rlwinm r12,r1,0,0,18 /* current_thread_info() */
634 lwz r9,TI_FLAGS(r12)
635 andi. r0,r9,_TIF_SYSCALL_T_OR_A
636 bnel- do_syscall_trace_leave
637 /* fall through */
638
639 .globl ret_from_except_full
640ret_from_except_full:
641 REST_NVGPRS(r1)
642 /* fall through */
643
644 .globl ret_from_except
645ret_from_except:
646 /* Hard-disable interrupts so that current_thread_info()->flags
647 * can't change between when we test it and when we return
648 * from the interrupt. */
649 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
650 SYNC /* Some chip revs have problems here... */
651 MTMSRD(r10) /* disable interrupts */
652
653 lwz r3,_MSR(r1) /* Returning to user mode? */
654 andi. r0,r3,MSR_PR
655 beq resume_kernel
656
657user_exc_return: /* r10 contains MSR_KERNEL here */
658 /* Check current_thread_info()->flags */
659 rlwinm r9,r1,0,0,18
660 lwz r9,TI_FLAGS(r9)
661 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
662 bne do_work
663
664restore_user:
665#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
666 /* Check whether this process has its own DBCR0 value. The single
667 step bit tells us that dbcr0 should be loaded. */
668 lwz r0,THREAD+THREAD_DBCR0(r2)
669 andis. r10,r0,DBCR0_IC@h
670 bnel- load_dbcr0
671#endif
672
673#ifdef CONFIG_PREEMPT
674 b restore
675
676/* N.B. the only way to get here is from the beq following ret_from_except. */
677resume_kernel:
678 /* check current_thread_info->preempt_count */
679 rlwinm r9,r1,0,0,18
680 lwz r0,TI_PREEMPT(r9)
681 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
682 bne restore
683 lwz r0,TI_FLAGS(r9)
684 andi. r0,r0,_TIF_NEED_RESCHED
685 beq+ restore
686 andi. r0,r3,MSR_EE /* interrupts off? */
687 beq restore /* don't schedule if so */
6881: bl preempt_schedule_irq
689 rlwinm r9,r1,0,0,18
690 lwz r3,TI_FLAGS(r9)
691 andi. r0,r3,_TIF_NEED_RESCHED
692 bne- 1b
693#else
694resume_kernel:
695#endif /* CONFIG_PREEMPT */
696
697 /* interrupts are hard-disabled at this point */
698restore:
699 lwz r0,GPR0(r1)
700 lwz r2,GPR2(r1)
701 REST_4GPRS(3, r1)
702 REST_2GPRS(7, r1)
703
704 lwz r10,_XER(r1)
705 lwz r11,_CTR(r1)
706 mtspr SPRN_XER,r10
707 mtctr r11
708
709 PPC405_ERR77(0,r1)
710 stwcx. r0,0,r1 /* to clear the reservation */
711
712#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
713 lwz r9,_MSR(r1)
714 andi. r10,r9,MSR_RI /* check if this exception occurred */
715 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
716
717 lwz r10,_CCR(r1)
718 lwz r11,_LINK(r1)
719 mtcrf 0xFF,r10
720 mtlr r11
721
722 /*
723 * Once we put values in SRR0 and SRR1, we are in a state
724 * where exceptions are not recoverable, since taking an
725 * exception will trash SRR0 and SRR1. Therefore we clear the
726 * MSR:RI bit to indicate this. If we do take an exception,
727 * we can't return to the point of the exception but we
728 * can restart the exception exit path at the label
729 * exc_exit_restart below. -- paulus
730 */
731 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
732 SYNC
733 MTMSRD(r10) /* clear the RI bit */
734 .globl exc_exit_restart
735exc_exit_restart:
736 lwz r9,_MSR(r1)
737 lwz r12,_NIP(r1)
738 FIX_SRR1(r9,r10)
739 mtspr SPRN_SRR0,r12
740 mtspr SPRN_SRR1,r9
741 REST_4GPRS(9, r1)
742 lwz r1,GPR1(r1)
743 .globl exc_exit_restart_end
744exc_exit_restart_end:
745 SYNC
746 RFI
747
748#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
749 /*
750 * This is a bit different on 4xx/Book-E because it doesn't have
751 * the RI bit in the MSR.
752 * The TLB miss handler checks if we have interrupted
753 * the exception exit path and restarts it if so
754 * (well maybe one day it will... :).
755 */
756 lwz r11,_LINK(r1)
757 mtlr r11
758 lwz r10,_CCR(r1)
759 mtcrf 0xff,r10
760 REST_2GPRS(9, r1)
761 .globl exc_exit_restart
762exc_exit_restart:
763 lwz r11,_NIP(r1)
764 lwz r12,_MSR(r1)
765exc_exit_start:
766 mtspr SPRN_SRR0,r11
767 mtspr SPRN_SRR1,r12
768 REST_2GPRS(11, r1)
769 lwz r1,GPR1(r1)
770 .globl exc_exit_restart_end
771exc_exit_restart_end:
772 PPC405_ERR77_SYNC
773 rfi
774 b . /* prevent prefetch past rfi */
775
776/*
777 * Returning from a critical interrupt in user mode doesn't need
778 * to be any different from a normal exception. For a critical
779 * interrupt in the kernel, we just return (without checking for
780 * preemption) since the interrupt may have happened at some crucial
781 * place (e.g. inside the TLB miss handler), and because we will be
782 * running with r1 pointing into critical_stack, not the current
783 * process's kernel stack (and therefore current_thread_info() will
784 * give the wrong answer).
785 * We have to restore various SPRs that may have been in use at the
786 * time of the critical interrupt.
787 *
788 */
789#ifdef CONFIG_40x
790#define PPC_40x_TURN_OFF_MSR_DR \
791 /* avoid any possible TLB misses here by turning off MSR.DR, we \
792 * assume the instructions here are mapped by a pinned TLB entry */ \
793 li r10,MSR_IR; \
794 mtmsr r10; \
795 isync; \
796 tophys(r1, r1);
797#else
798#define PPC_40x_TURN_OFF_MSR_DR
799#endif
800
801#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
802 REST_NVGPRS(r1); \
803 lwz r3,_MSR(r1); \
804 andi. r3,r3,MSR_PR; \
805 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
806 bne user_exc_return; \
807 lwz r0,GPR0(r1); \
808 lwz r2,GPR2(r1); \
809 REST_4GPRS(3, r1); \
810 REST_2GPRS(7, r1); \
811 lwz r10,_XER(r1); \
812 lwz r11,_CTR(r1); \
813 mtspr SPRN_XER,r10; \
814 mtctr r11; \
815 PPC405_ERR77(0,r1); \
816 stwcx. r0,0,r1; /* to clear the reservation */ \
817 lwz r11,_LINK(r1); \
818 mtlr r11; \
819 lwz r10,_CCR(r1); \
820 mtcrf 0xff,r10; \
821 PPC_40x_TURN_OFF_MSR_DR; \
822 lwz r9,_DEAR(r1); \
823 lwz r10,_ESR(r1); \
824 mtspr SPRN_DEAR,r9; \
825 mtspr SPRN_ESR,r10; \
826 lwz r11,_NIP(r1); \
827 lwz r12,_MSR(r1); \
828 mtspr exc_lvl_srr0,r11; \
829 mtspr exc_lvl_srr1,r12; \
830 lwz r9,GPR9(r1); \
831 lwz r12,GPR12(r1); \
832 lwz r10,GPR10(r1); \
833 lwz r11,GPR11(r1); \
834 lwz r1,GPR1(r1); \
835 PPC405_ERR77_SYNC; \
836 exc_lvl_rfi; \
837 b .; /* prevent prefetch past exc_lvl_rfi */
838
839 .globl ret_from_crit_exc
840ret_from_crit_exc:
841 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
842
843#ifdef CONFIG_BOOKE
844 .globl ret_from_debug_exc
845ret_from_debug_exc:
846 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
847
848 .globl ret_from_mcheck_exc
849ret_from_mcheck_exc:
850 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
851#endif /* CONFIG_BOOKE */
852
853/*
854 * Load the DBCR0 value for a task that is being ptraced,
855 * having first saved away the global DBCR0. Note that r0
856 * has the dbcr0 value to set upon entry to this.
857 */
858load_dbcr0:
859 mfmsr r10 /* first disable debug exceptions */
860 rlwinm r10,r10,0,~MSR_DE
861 mtmsr r10
862 isync
863 mfspr r10,SPRN_DBCR0
864 lis r11,global_dbcr0@ha
865 addi r11,r11,global_dbcr0@l
866 stw r10,0(r11)
867 mtspr SPRN_DBCR0,r0
868 lwz r10,4(r11)
869 addi r10,r10,1
870 stw r10,4(r11)
871 li r11,-1
872 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
873 blr
874
875 .comm global_dbcr0,8
876#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
877
878do_work: /* r10 contains MSR_KERNEL here */
879 andi. r0,r9,_TIF_NEED_RESCHED
880 beq do_user_signal
881
882do_resched: /* r10 contains MSR_KERNEL here */
883 ori r10,r10,MSR_EE
884 SYNC
885 MTMSRD(r10) /* hard-enable interrupts */
886 bl schedule
887recheck:
888 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
889 SYNC
890 MTMSRD(r10) /* disable interrupts */
891 rlwinm r9,r1,0,0,18
892 lwz r9,TI_FLAGS(r9)
893 andi. r0,r9,_TIF_NEED_RESCHED
894 bne- do_resched
895 andi. r0,r9,_TIF_SIGPENDING
896 beq restore_user
897do_user_signal: /* r10 contains MSR_KERNEL here */
898 ori r10,r10,MSR_EE
899 SYNC
900 MTMSRD(r10) /* hard-enable interrupts */
901 /* save r13-r31 in the exception frame, if not already done */
902 lwz r3,TRAP(r1)
903 andi. r0,r3,1
904 beq 2f
905 SAVE_NVGPRS(r1)
906 rlwinm r3,r3,0,0,30
907 stw r3,TRAP(r1)
9082: li r3,0
909 addi r4,r1,STACK_FRAME_OVERHEAD
910 bl do_signal
911 REST_NVGPRS(r1)
912 b recheck
913
914/*
915 * We come here when we are at the end of handling an exception
916 * that occurred at a place where taking an exception will lose
917 * state information, such as the contents of SRR0 and SRR1.
918 */
919nonrecoverable:
920 lis r10,exc_exit_restart_end@ha
921 addi r10,r10,exc_exit_restart_end@l
922 cmplw r12,r10
923 bge 3f
924 lis r11,exc_exit_restart@ha
925 addi r11,r11,exc_exit_restart@l
926 cmplw r12,r11
927 blt 3f
928 lis r10,ee_restarts@ha
929 lwz r12,ee_restarts@l(r10)
930 addi r12,r12,1
931 stw r12,ee_restarts@l(r10)
932 mr r12,r11 /* restart at exc_exit_restart */
933 blr
9343: /* OK, we can't recover, kill this process */
935 /* but the 601 doesn't implement the RI bit, so assume it's OK */
936BEGIN_FTR_SECTION
937 blr
938END_FTR_SECTION_IFSET(CPU_FTR_601)
939 lwz r3,TRAP(r1)
940 andi. r0,r3,1
941 beq 4f
942 SAVE_NVGPRS(r1)
943 rlwinm r3,r3,0,0,30
944 stw r3,TRAP(r1)
9454: addi r3,r1,STACK_FRAME_OVERHEAD
946 bl nonrecoverable_exception
947 /* shouldn't return */
948 b 4b
949
950 .comm ee_restarts,4
951
952/*
953 * PROM code for specific machines follows. Put it
954 * here so it's easy to add arch-specific sections later.
955 * -- Cort
956 */
957#ifdef CONFIG_PPC_OF
958/*
959 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
960 * called with the MMU off.
961 */
962_GLOBAL(enter_rtas)
963 stwu r1,-INT_FRAME_SIZE(r1)
964 mflr r0
965 stw r0,INT_FRAME_SIZE+4(r1)
966 lis r4,rtas_data@ha
967 lwz r4,rtas_data@l(r4)
968 lis r6,1f@ha /* physical return address for rtas */
969 addi r6,r6,1f@l
970 tophys(r6,r6)
971 tophys(r7,r1)
972 lis r8,rtas_entry@ha
973 lwz r8,rtas_entry@l(r8)
974 mfmsr r9
975 stw r9,8(r1)
976 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
977 SYNC /* disable interrupts so SRR0/1 */
978 MTMSRD(r0) /* don't get trashed */
979 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
980 mtlr r6
981 CLR_TOP32(r7)
982 mtspr SPRN_SPRG2,r7
983 mtspr SPRN_SRR0,r8
984 mtspr SPRN_SRR1,r9
985 RFI
9861: tophys(r9,r1)
987 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
988 lwz r9,8(r9) /* original msr value */
989 FIX_SRR1(r9,r0)
990 addi r1,r1,INT_FRAME_SIZE
991 li r0,0
992 mtspr SPRN_SPRG2,r0
993 mtspr SPRN_SRR0,r8
994 mtspr SPRN_SRR1,r9
995 RFI /* return to caller */
996
997 .globl machine_check_in_rtas
998machine_check_in_rtas:
999 twi 31,0,0
1000 /* XXX load up BATs and panic */
1001
1002#endif /* CONFIG_PPC_OF */