aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/entry.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ppc64/kernel/entry.S
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/ppc64/kernel/entry.S')
-rw-r--r--arch/ppc64/kernel/entry.S845
1 files changed, 845 insertions, 0 deletions
diff --git a/arch/ppc64/kernel/entry.S b/arch/ppc64/kernel/entry.S
new file mode 100644
index 000000000000..d3604056e1a9
--- /dev/null
+++ b/arch/ppc64/kernel/entry.S
@@ -0,0 +1,845 @@
1/*
2 * arch/ppc64/kernel/entry.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 * This file contains the system call entry code, context switch
15 * code, and exception/interrupt return code for PowerPC.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22
23#include <linux/config.h>
24#include <linux/errno.h>
25#include <asm/unistd.h>
26#include <asm/processor.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/offsets.h>
32#include <asm/cputable.h>
33
34#ifdef CONFIG_PPC_ISERIES
35#define DO_SOFT_DISABLE
36#endif
37
38/*
39 * System calls.
40 */
41 .section ".toc","aw"
42.SYS_CALL_TABLE:
43 .tc .sys_call_table[TC],.sys_call_table
44
45.SYS_CALL_TABLE32:
46 .tc .sys_call_table32[TC],.sys_call_table32
47
48/* This value is used to mark exception frames on the stack. */
49exception_marker:
50 .tc ID_72656773_68657265[TC],0x7265677368657265
51
52 .section ".text"
53 .align 7
54
55#undef SHOW_SYSCALLS
56
57 .globl system_call_common
58system_call_common:
59 andi. r10,r12,MSR_PR
60 mr r10,r1
61 addi r1,r1,-INT_FRAME_SIZE
62 beq- 1f
63 ld r1,PACAKSAVE(r13)
641: std r10,0(r1)
65 std r11,_NIP(r1)
66 std r12,_MSR(r1)
67 std r0,GPR0(r1)
68 std r10,GPR1(r1)
69 std r2,GPR2(r1)
70 std r3,GPR3(r1)
71 std r4,GPR4(r1)
72 std r5,GPR5(r1)
73 std r6,GPR6(r1)
74 std r7,GPR7(r1)
75 std r8,GPR8(r1)
76 li r11,0
77 std r11,GPR9(r1)
78 std r11,GPR10(r1)
79 std r11,GPR11(r1)
80 std r11,GPR12(r1)
81 std r9,GPR13(r1)
82 crclr so
83 mfcr r9
84 mflr r10
85 li r11,0xc01
86 std r9,_CCR(r1)
87 std r10,_LINK(r1)
88 std r11,_TRAP(r1)
89 mfxer r9
90 mfctr r10
91 std r9,_XER(r1)
92 std r10,_CTR(r1)
93 std r3,ORIG_GPR3(r1)
94 ld r2,PACATOC(r13)
95 addi r9,r1,STACK_FRAME_OVERHEAD
96 ld r11,exception_marker@toc(r2)
97 std r11,-16(r9) /* "regshere" marker */
98#ifdef CONFIG_PPC_ISERIES
99 /* Hack for handling interrupts when soft-enabling on iSeries */
100 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
101 andi. r10,r12,MSR_PR /* from kernel */
102 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
103 beq hardware_interrupt_entry
104 lbz r10,PACAPROCENABLED(r13)
105 std r10,SOFTE(r1)
106#endif
107 mfmsr r11
108 ori r11,r11,MSR_EE
109 mtmsrd r11,1
110
111#ifdef SHOW_SYSCALLS
112 bl .do_show_syscall
113 REST_GPR(0,r1)
114 REST_4GPRS(3,r1)
115 REST_2GPRS(7,r1)
116 addi r9,r1,STACK_FRAME_OVERHEAD
117#endif
118 clrrdi r11,r1,THREAD_SHIFT
119 li r12,0
120 ld r10,TI_FLAGS(r11)
121 stb r12,TI_SC_NOERR(r11)
122 andi. r11,r10,_TIF_SYSCALL_T_OR_A
123 bne- syscall_dotrace
124syscall_dotrace_cont:
125 cmpldi 0,r0,NR_syscalls
126 bge- syscall_enosys
127
128system_call: /* label this so stack traces look sane */
129/*
130 * Need to vector to 32 Bit or default sys_call_table here,
131 * based on caller's run-mode / personality.
132 */
133 ld r11,.SYS_CALL_TABLE@toc(2)
134 andi. r10,r10,_TIF_32BIT
135 beq 15f
136 ld r11,.SYS_CALL_TABLE32@toc(2)
137 clrldi r3,r3,32
138 clrldi r4,r4,32
139 clrldi r5,r5,32
140 clrldi r6,r6,32
141 clrldi r7,r7,32
142 clrldi r8,r8,32
14315:
144 slwi r0,r0,3
145 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
146 mtctr r10
147 bctrl /* Call handler */
148
149syscall_exit:
150#ifdef SHOW_SYSCALLS
151 std r3,GPR3(r1)
152 bl .do_show_syscall_exit
153 ld r3,GPR3(r1)
154#endif
155 std r3,RESULT(r1)
156 ld r5,_CCR(r1)
157 li r10,-_LAST_ERRNO
158 cmpld r3,r10
159 clrrdi r12,r1,THREAD_SHIFT
160 bge- syscall_error
161syscall_error_cont:
162
163 /* check for syscall tracing or audit */
164 ld r9,TI_FLAGS(r12)
165 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
166 bne- syscall_exit_trace
167syscall_exit_trace_cont:
168
169 /* disable interrupts so current_thread_info()->flags can't change,
170 and so that we don't get interrupted after loading SRR0/1. */
171 ld r8,_MSR(r1)
172 andi. r10,r8,MSR_RI
173 beq- unrecov_restore
174 mfmsr r10
175 rldicl r10,r10,48,1
176 rotldi r10,r10,16
177 mtmsrd r10,1
178 ld r9,TI_FLAGS(r12)
179 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
180 bne- syscall_exit_work
181 ld r7,_NIP(r1)
182 stdcx. r0,0,r1 /* to clear the reservation */
183 andi. r6,r8,MSR_PR
184 ld r4,_LINK(r1)
185 beq- 1f /* only restore r13 if */
186 ld r13,GPR13(r1) /* returning to usermode */
1871: ld r2,GPR2(r1)
188 li r12,MSR_RI
189 andc r10,r10,r12
190 mtmsrd r10,1 /* clear MSR.RI */
191 ld r1,GPR1(r1)
192 mtlr r4
193 mtcr r5
194 mtspr SRR0,r7
195 mtspr SRR1,r8
196 rfid
197 b . /* prevent speculative execution */
198
199syscall_enosys:
200 li r3,-ENOSYS
201 std r3,RESULT(r1)
202 clrrdi r12,r1,THREAD_SHIFT
203 ld r5,_CCR(r1)
204
205syscall_error:
206 lbz r11,TI_SC_NOERR(r12)
207 cmpwi 0,r11,0
208 bne- syscall_error_cont
209 neg r3,r3
210 oris r5,r5,0x1000 /* Set SO bit in CR */
211 std r5,_CCR(r1)
212 b syscall_error_cont
213
214/* Traced system call support */
215syscall_dotrace:
216 bl .save_nvgprs
217 addi r3,r1,STACK_FRAME_OVERHEAD
218 bl .do_syscall_trace_enter
219 ld r0,GPR0(r1) /* Restore original registers */
220 ld r3,GPR3(r1)
221 ld r4,GPR4(r1)
222 ld r5,GPR5(r1)
223 ld r6,GPR6(r1)
224 ld r7,GPR7(r1)
225 ld r8,GPR8(r1)
226 addi r9,r1,STACK_FRAME_OVERHEAD
227 clrrdi r10,r1,THREAD_SHIFT
228 ld r10,TI_FLAGS(r10)
229 b syscall_dotrace_cont
230
231syscall_exit_trace:
232 std r3,GPR3(r1)
233 bl .save_nvgprs
234 addi r3,r1,STACK_FRAME_OVERHEAD
235 bl .do_syscall_trace_leave
236 REST_NVGPRS(r1)
237 ld r3,GPR3(r1)
238 ld r5,_CCR(r1)
239 clrrdi r12,r1,THREAD_SHIFT
240 b syscall_exit_trace_cont
241
242/* Stuff to do on exit from a system call. */
243syscall_exit_work:
244 std r3,GPR3(r1)
245 std r5,_CCR(r1)
246 b .ret_from_except_lite
247
248/* Save non-volatile GPRs, if not already saved. */
249_GLOBAL(save_nvgprs)
250 ld r11,_TRAP(r1)
251 andi. r0,r11,1
252 beqlr-
253 SAVE_NVGPRS(r1)
254 clrrdi r0,r11,1
255 std r0,_TRAP(r1)
256 blr
257
258/*
259 * The sigsuspend and rt_sigsuspend system calls can call do_signal
260 * and thus put the process into the stopped state where we might
261 * want to examine its user state with ptrace. Therefore we need
262 * to save all the nonvolatile registers (r14 - r31) before calling
263 * the C code. Similarly, fork, vfork and clone need the full
264 * register state on the stack so that it can be copied to the child.
265 */
266_GLOBAL(ppc32_sigsuspend)
267 bl .save_nvgprs
268 bl .sys32_sigsuspend
269 b 70f
270
271_GLOBAL(ppc64_rt_sigsuspend)
272 bl .save_nvgprs
273 bl .sys_rt_sigsuspend
274 b 70f
275
276_GLOBAL(ppc32_rt_sigsuspend)
277 bl .save_nvgprs
278 bl .sys32_rt_sigsuspend
279 /* If sigsuspend() returns zero, we are going into a signal handler */
28070: cmpdi 0,r3,0
281 beq .ret_from_except
282 /* If it returned -EINTR, we need to return via syscall_exit to set
283 the SO bit in cr0 and potentially stop for ptrace. */
284 b syscall_exit
285
286_GLOBAL(ppc_fork)
287 bl .save_nvgprs
288 bl .sys_fork
289 b syscall_exit
290
291_GLOBAL(ppc_vfork)
292 bl .save_nvgprs
293 bl .sys_vfork
294 b syscall_exit
295
296_GLOBAL(ppc_clone)
297 bl .save_nvgprs
298 bl .sys_clone
299 b syscall_exit
300
301_GLOBAL(ppc32_swapcontext)
302 bl .save_nvgprs
303 bl .sys32_swapcontext
304 b 80f
305
306_GLOBAL(ppc64_swapcontext)
307 bl .save_nvgprs
308 bl .sys_swapcontext
309 b 80f
310
311_GLOBAL(ppc32_sigreturn)
312 bl .sys32_sigreturn
313 b 80f
314
315_GLOBAL(ppc32_rt_sigreturn)
316 bl .sys32_rt_sigreturn
317 b 80f
318
319_GLOBAL(ppc64_rt_sigreturn)
320 bl .sys_rt_sigreturn
321
32280: cmpdi 0,r3,0
323 blt syscall_exit
324 clrrdi r4,r1,THREAD_SHIFT
325 ld r4,TI_FLAGS(r4)
326 andi. r4,r4,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
327 beq+ 81f
328 addi r3,r1,STACK_FRAME_OVERHEAD
329 bl .do_syscall_trace_leave
33081: b .ret_from_except
331
332_GLOBAL(ret_from_fork)
333 bl .schedule_tail
334 REST_NVGPRS(r1)
335 li r3,0
336 b syscall_exit
337
338/*
339 * This routine switches between two different tasks. The process
340 * state of one is saved on its kernel stack. Then the state
341 * of the other is restored from its kernel stack. The memory
342 * management hardware is updated to the second process's state.
343 * Finally, we can return to the second process, via ret_from_except.
344 * On entry, r3 points to the THREAD for the current task, r4
345 * points to the THREAD for the new task.
346 *
347 * Note: there are two ways to get to the "going out" portion
348 * of this code; either by coming in via the entry (_switch)
349 * or via "fork" which must set up an environment equivalent
350 * to the "_switch" path. If you change this you'll have to change
351 * the fork code also.
352 *
353 * The code which creates the new task context is in 'copy_thread'
354 * in arch/ppc64/kernel/process.c
355 */
356 .align 7
357_GLOBAL(_switch)
358 mflr r0
359 std r0,16(r1)
360 stdu r1,-SWITCH_FRAME_SIZE(r1)
361 /* r3-r13 are caller saved -- Cort */
362 SAVE_8GPRS(14, r1)
363 SAVE_10GPRS(22, r1)
364 mflr r20 /* Return to switch caller */
365 mfmsr r22
366 li r0, MSR_FP
367#ifdef CONFIG_ALTIVEC
368BEGIN_FTR_SECTION
369 oris r0,r0,MSR_VEC@h /* Disable altivec */
370 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
371 std r24,THREAD_VRSAVE(r3)
372END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
373#endif /* CONFIG_ALTIVEC */
374 and. r0,r0,r22
375 beq+ 1f
376 andc r22,r22,r0
377 mtmsrd r22
378 isync
3791: std r20,_NIP(r1)
380 mfcr r23
381 std r23,_CCR(r1)
382 std r1,KSP(r3) /* Set old stack pointer */
383
384#ifdef CONFIG_SMP
385 /* We need a sync somewhere here to make sure that if the
386 * previous task gets rescheduled on another CPU, it sees all
387 * stores it has performed on this one.
388 */
389 sync
390#endif /* CONFIG_SMP */
391
392 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
393 std r6,PACACURRENT(r13) /* Set new 'current' */
394
395 ld r8,KSP(r4) /* new stack pointer */
396BEGIN_FTR_SECTION
397 clrrdi r6,r8,28 /* get its ESID */
398 clrrdi r9,r1,28 /* get current sp ESID */
399 clrldi. r0,r6,2 /* is new ESID c00000000? */
400 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
401 cror eq,4*cr1+eq,eq
402 beq 2f /* if yes, don't slbie it */
403 oris r0,r6,0x0800 /* set C (class) bit */
404
405 /* Bolt in the new stack SLB entry */
406 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
407 oris r6,r6,(SLB_ESID_V)@h
408 ori r6,r6,(SLB_NUM_BOLTED-1)@l
409 slbie r0
410 slbie r0 /* Workaround POWER5 < DD2.1 issue */
411 slbmte r7,r6
412 isync
413
4142:
415END_FTR_SECTION_IFSET(CPU_FTR_SLB)
416 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
417 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
418 because we don't need to leave the 288-byte ABI gap at the
419 top of the kernel stack. */
420 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
421
422 mr r1,r8 /* start using new stack pointer */
423 std r7,PACAKSAVE(r13)
424
425 ld r6,_CCR(r1)
426 mtcrf 0xFF,r6
427
428#ifdef CONFIG_ALTIVEC
429BEGIN_FTR_SECTION
430 ld r0,THREAD_VRSAVE(r4)
431 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
432END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
433#endif /* CONFIG_ALTIVEC */
434
435 /* r3-r13 are destroyed -- Cort */
436 REST_8GPRS(14, r1)
437 REST_10GPRS(22, r1)
438
439#ifdef CONFIG_PPC_ISERIES
440 clrrdi r7,r1,THREAD_SHIFT /* get current_thread_info() */
441 ld r7,TI_FLAGS(r7) /* Get run light flag */
442 mfspr r9,CTRLF
443 srdi r7,r7,TIF_RUN_LIGHT
444 insrdi r9,r7,1,63 /* Insert run light into CTRL */
445 mtspr CTRLT,r9
446#endif
447
448 /* convert old thread to its task_struct for return value */
449 addi r3,r3,-THREAD
450 ld r7,_NIP(r1) /* Return to _switch caller in new task */
451 mtlr r7
452 addi r1,r1,SWITCH_FRAME_SIZE
453 blr
454
455 .align 7
456_GLOBAL(ret_from_except)
457 ld r11,_TRAP(r1)
458 andi. r0,r11,1
459 bne .ret_from_except_lite
460 REST_NVGPRS(r1)
461
462_GLOBAL(ret_from_except_lite)
463 /*
464 * Disable interrupts so that current_thread_info()->flags
465 * can't change between when we test it and when we return
466 * from the interrupt.
467 */
468 mfmsr r10 /* Get current interrupt state */
469 rldicl r9,r10,48,1 /* clear MSR_EE */
470 rotldi r9,r9,16
471 mtmsrd r9,1 /* Update machine state */
472
473#ifdef CONFIG_PREEMPT
474 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
475 li r0,_TIF_NEED_RESCHED /* bits to check */
476 ld r3,_MSR(r1)
477 ld r4,TI_FLAGS(r9)
478 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
479 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
480 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
481 bne do_work
482
483#else /* !CONFIG_PREEMPT */
484 ld r3,_MSR(r1) /* Returning to user mode? */
485 andi. r3,r3,MSR_PR
486 beq restore /* if not, just restore regs and return */
487
488 /* Check current_thread_info()->flags */
489 clrrdi r9,r1,THREAD_SHIFT
490 ld r4,TI_FLAGS(r9)
491 andi. r0,r4,_TIF_USER_WORK_MASK
492 bne do_work
493#endif
494
495restore:
496#ifdef CONFIG_PPC_ISERIES
497 ld r5,SOFTE(r1)
498 cmpdi 0,r5,0
499 beq 4f
500 /* Check for pending interrupts (iSeries) */
501 ld r3,PACALPPACA+LPPACAANYINT(r13)
502 cmpdi r3,0
503 beq+ 4f /* skip do_IRQ if no interrupts */
504
505 li r3,0
506 stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
507 ori r10,r10,MSR_EE
508 mtmsrd r10 /* hard-enable again */
509 addi r3,r1,STACK_FRAME_OVERHEAD
510 bl .do_IRQ
511 b .ret_from_except_lite /* loop back and handle more */
512
5134: stb r5,PACAPROCENABLED(r13)
514#endif
515
516 ld r3,_MSR(r1)
517 andi. r0,r3,MSR_RI
518 beq- unrecov_restore
519
520 andi. r0,r3,MSR_PR
521
522 /*
523 * r13 is our per cpu area, only restore it if we are returning to
524 * userspace
525 */
526 beq 1f
527 REST_GPR(13, r1)
5281:
529 ld r3,_CTR(r1)
530 ld r0,_LINK(r1)
531 mtctr r3
532 mtlr r0
533 ld r3,_XER(r1)
534 mtspr XER,r3
535
536 REST_8GPRS(5, r1)
537
538 stdcx. r0,0,r1 /* to clear the reservation */
539
540 mfmsr r0
541 li r2, MSR_RI
542 andc r0,r0,r2
543 mtmsrd r0,1
544
545 ld r0,_MSR(r1)
546 mtspr SRR1,r0
547
548 ld r2,_CCR(r1)
549 mtcrf 0xFF,r2
550 ld r2,_NIP(r1)
551 mtspr SRR0,r2
552
553 ld r0,GPR0(r1)
554 ld r2,GPR2(r1)
555 ld r3,GPR3(r1)
556 ld r4,GPR4(r1)
557 ld r1,GPR1(r1)
558
559 rfid
560 b . /* prevent speculative execution */
561
562/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
563do_work:
564#ifdef CONFIG_PREEMPT
565 andi. r0,r3,MSR_PR /* Returning to user mode? */
566 bne user_work
567 /* Check that preempt_count() == 0 and interrupts are enabled */
568 lwz r8,TI_PREEMPT(r9)
569 cmpwi cr1,r8,0
570#ifdef CONFIG_PPC_ISERIES
571 ld r0,SOFTE(r1)
572 cmpdi r0,0
573#else
574 andi. r0,r3,MSR_EE
575#endif
576 crandc eq,cr1*4+eq,eq
577 bne restore
578 /* here we are preempting the current task */
5791:
580#ifdef CONFIG_PPC_ISERIES
581 li r0,1
582 stb r0,PACAPROCENABLED(r13)
583#endif
584 ori r10,r10,MSR_EE
585 mtmsrd r10,1 /* reenable interrupts */
586 bl .preempt_schedule
587 mfmsr r10
588 clrrdi r9,r1,THREAD_SHIFT
589 rldicl r10,r10,48,1 /* disable interrupts again */
590 rotldi r10,r10,16
591 mtmsrd r10,1
592 ld r4,TI_FLAGS(r9)
593 andi. r0,r4,_TIF_NEED_RESCHED
594 bne 1b
595 b restore
596
597user_work:
598#endif
599 /* Enable interrupts */
600 ori r10,r10,MSR_EE
601 mtmsrd r10,1
602
603 andi. r0,r4,_TIF_NEED_RESCHED
604 beq 1f
605 bl .schedule
606 b .ret_from_except_lite
607
6081: bl .save_nvgprs
609 li r3,0
610 addi r4,r1,STACK_FRAME_OVERHEAD
611 bl .do_signal
612 b .ret_from_except
613
614unrecov_restore:
615 addi r3,r1,STACK_FRAME_OVERHEAD
616 bl .unrecoverable_exception
617 b unrecov_restore
618
619#ifdef CONFIG_PPC_RTAS
620/*
621 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
622 * called with the MMU off.
623 *
624 * In addition, we need to be in 32b mode, at least for now.
625 *
626 * Note: r3 is an input parameter to rtas, so don't trash it...
627 */
628_GLOBAL(enter_rtas)
629 mflr r0
630 std r0,16(r1)
631 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
632
633 /* Because RTAS is running in 32b mode, it clobbers the high order half
634 * of all registers that it saves. We therefore save those registers
635 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
636 */
637 SAVE_GPR(2, r1) /* Save the TOC */
638 SAVE_GPR(13, r1) /* Save paca */
639 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
640 SAVE_10GPRS(22, r1) /* ditto */
641
642 mfcr r4
643 std r4,_CCR(r1)
644 mfctr r5
645 std r5,_CTR(r1)
646 mfspr r6,XER
647 std r6,_XER(r1)
648 mfdar r7
649 std r7,_DAR(r1)
650 mfdsisr r8
651 std r8,_DSISR(r1)
652 mfsrr0 r9
653 std r9,_SRR0(r1)
654 mfsrr1 r10
655 std r10,_SRR1(r1)
656
657 /* There is no way it is acceptable to get here with interrupts enabled,
658 * check it with the asm equivalent of WARN_ON
659 */
660 mfmsr r6
661 andi. r0,r6,MSR_EE
6621: tdnei r0,0
663.section __bug_table,"a"
664 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
665.previous
666.section .rodata,"a"
6671: .asciz __FILE__
6682: .asciz "enter_rtas"
669.previous
670
671 /* Unfortunately, the stack pointer and the MSR are also clobbered,
672 * so they are saved in the PACA which allows us to restore
673 * our original state after RTAS returns.
674 */
675 std r1,PACAR1(r13)
676 std r6,PACASAVEDMSR(r13)
677
678 /* Setup our real return addr */
679 SET_REG_TO_LABEL(r4,.rtas_return_loc)
680 SET_REG_TO_CONST(r9,KERNELBASE)
681 sub r4,r4,r9
682 mtlr r4
683
684 li r0,0
685 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
686 andc r0,r6,r0
687
688 li r9,1
689 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
690 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
691 andc r6,r0,r9
692 ori r6,r6,MSR_RI
693 sync /* disable interrupts so SRR0/1 */
694 mtmsrd r0 /* don't get trashed */
695
696 SET_REG_TO_LABEL(r4,rtas)
697 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
698 ld r4,RTASBASE(r4) /* get the rtas->base value */
699
700 mtspr SRR0,r5
701 mtspr SRR1,r6
702 rfid
703 b . /* prevent speculative execution */
704
705_STATIC(rtas_return_loc)
706 /* relocation is off at this point */
707 mfspr r4,SPRG3 /* Get PACA */
708 SET_REG_TO_CONST(r5, KERNELBASE)
709 sub r4,r4,r5 /* RELOC the PACA base pointer */
710
711 mfmsr r6
712 li r0,MSR_RI
713 andc r6,r6,r0
714 sync
715 mtmsrd r6
716
717 ld r1,PACAR1(r4) /* Restore our SP */
718 LOADADDR(r3,.rtas_restore_regs)
719 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
720
721 mtspr SRR0,r3
722 mtspr SRR1,r4
723 rfid
724 b . /* prevent speculative execution */
725
726_STATIC(rtas_restore_regs)
727 /* relocation is on at this point */
728 REST_GPR(2, r1) /* Restore the TOC */
729 REST_GPR(13, r1) /* Restore paca */
730 REST_8GPRS(14, r1) /* Restore the non-volatiles */
731 REST_10GPRS(22, r1) /* ditto */
732
733 mfspr r13,SPRG3
734
735 ld r4,_CCR(r1)
736 mtcr r4
737 ld r5,_CTR(r1)
738 mtctr r5
739 ld r6,_XER(r1)
740 mtspr XER,r6
741 ld r7,_DAR(r1)
742 mtdar r7
743 ld r8,_DSISR(r1)
744 mtdsisr r8
745 ld r9,_SRR0(r1)
746 mtsrr0 r9
747 ld r10,_SRR1(r1)
748 mtsrr1 r10
749
750 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
751 ld r0,16(r1) /* get return address */
752
753 mtlr r0
754 blr /* return to caller */
755
756#endif /* CONFIG_PPC_RTAS */
757
758#ifdef CONFIG_PPC_MULTIPLATFORM
759
760_GLOBAL(enter_prom)
761 mflr r0
762 std r0,16(r1)
763 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
764
765 /* Because PROM is running in 32b mode, it clobbers the high order half
766 * of all registers that it saves. We therefore save those registers
767 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
768 */
769 SAVE_8GPRS(2, r1)
770 SAVE_GPR(13, r1)
771 SAVE_8GPRS(14, r1)
772 SAVE_10GPRS(22, r1)
773 mfcr r4
774 std r4,_CCR(r1)
775 mfctr r5
776 std r5,_CTR(r1)
777 mfspr r6,XER
778 std r6,_XER(r1)
779 mfdar r7
780 std r7,_DAR(r1)
781 mfdsisr r8
782 std r8,_DSISR(r1)
783 mfsrr0 r9
784 std r9,_SRR0(r1)
785 mfsrr1 r10
786 std r10,_SRR1(r1)
787 mfmsr r11
788 std r11,_MSR(r1)
789
790 /* Get the PROM entrypoint */
791 ld r0,GPR4(r1)
792 mtlr r0
793
794 /* Switch MSR to 32 bits mode
795 */
796 mfmsr r11
797 li r12,1
798 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
799 andc r11,r11,r12
800 li r12,1
801 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
802 andc r11,r11,r12
803 mtmsrd r11
804 isync
805
806 /* Restore arguments & enter PROM here... */
807 ld r3,GPR3(r1)
808 blrl
809
810 /* Just make sure that r1 top 32 bits didn't get
811 * corrupt by OF
812 */
813 rldicl r1,r1,0,32
814
815 /* Restore the MSR (back to 64 bits) */
816 ld r0,_MSR(r1)
817 mtmsrd r0
818 isync
819
820 /* Restore other registers */
821 REST_GPR(2, r1)
822 REST_GPR(13, r1)
823 REST_8GPRS(14, r1)
824 REST_10GPRS(22, r1)
825 ld r4,_CCR(r1)
826 mtcr r4
827 ld r5,_CTR(r1)
828 mtctr r5
829 ld r6,_XER(r1)
830 mtspr XER,r6
831 ld r7,_DAR(r1)
832 mtdar r7
833 ld r8,_DSISR(r1)
834 mtdsisr r8
835 ld r9,_SRR0(r1)
836 mtsrr0 r9
837 ld r10,_SRR1(r1)
838 mtsrr1 r10
839
840 addi r1,r1,PROM_FRAME_SIZE
841 ld r0,16(r1)
842 mtlr r0
843 blr
844
845#endif /* CONFIG_PPC_MULTIPLATFORM */