diff options
Diffstat (limited to 'arch/powerpc/kernel/exceptions-64e.S')
-rw-r--r-- | arch/powerpc/kernel/exceptions-64e.S | 1001 |
1 files changed, 1001 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S new file mode 100644 index 000000000000..9048f96237f6 --- /dev/null +++ b/arch/powerpc/kernel/exceptions-64e.S | |||
@@ -0,0 +1,1001 @@ | |||
1 | /* | ||
2 | * Boot code and exception vectors for Book3E processors | ||
3 | * | ||
4 | * Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/threads.h> | ||
13 | #include <asm/reg.h> | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/ppc_asm.h> | ||
16 | #include <asm/asm-offsets.h> | ||
17 | #include <asm/cputable.h> | ||
18 | #include <asm/setup.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | #include <asm/reg.h> | ||
21 | #include <asm/exception-64e.h> | ||
22 | #include <asm/bug.h> | ||
23 | #include <asm/irqflags.h> | ||
24 | #include <asm/ptrace.h> | ||
25 | #include <asm/ppc-opcode.h> | ||
26 | #include <asm/mmu.h> | ||
27 | |||
28 | /* XXX This will ultimately add space for a special exception save | ||
29 | * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc... | ||
30 | * when taking special interrupts. For now we don't support that, | ||
31 | * special interrupts from within a non-standard level will probably | ||
32 | * blow you up | ||
33 | */ | ||
34 | #define SPECIAL_EXC_FRAME_SIZE INT_FRAME_SIZE | ||
35 | |||
36 | /* Exception prolog code for all exceptions */ | ||
37 | #define EXCEPTION_PROLOG(n, type, addition) \ | ||
38 | mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \ | ||
39 | mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \ | ||
40 | std r10,PACA_EX##type+EX_R10(r13); \ | ||
41 | std r11,PACA_EX##type+EX_R11(r13); \ | ||
42 | mfcr r10; /* save CR */ \ | ||
43 | addition; /* additional code for that exc. */ \ | ||
44 | std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \ | ||
45 | stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \ | ||
46 | mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \ | ||
47 | type##_SET_KSTACK; /* get special stack if necessary */\ | ||
48 | andi. r10,r11,MSR_PR; /* save stack pointer */ \ | ||
49 | beq 1f; /* branch around if supervisor */ \ | ||
50 | ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\ | ||
51 | 1: cmpdi cr1,r1,0; /* check if SP makes sense */ \ | ||
52 | bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \ | ||
53 | mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */ | ||
54 | |||
55 | /* Exception type-specific macros */ | ||
56 | #define GEN_SET_KSTACK \ | ||
57 | subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ | ||
58 | #define SPRN_GEN_SRR0 SPRN_SRR0 | ||
59 | #define SPRN_GEN_SRR1 SPRN_SRR1 | ||
60 | |||
61 | #define CRIT_SET_KSTACK \ | ||
62 | ld r1,PACA_CRIT_STACK(r13); \ | ||
63 | subi r1,r1,SPECIAL_EXC_FRAME_SIZE; | ||
64 | #define SPRN_CRIT_SRR0 SPRN_CSRR0 | ||
65 | #define SPRN_CRIT_SRR1 SPRN_CSRR1 | ||
66 | |||
67 | #define DBG_SET_KSTACK \ | ||
68 | ld r1,PACA_DBG_STACK(r13); \ | ||
69 | subi r1,r1,SPECIAL_EXC_FRAME_SIZE; | ||
70 | #define SPRN_DBG_SRR0 SPRN_DSRR0 | ||
71 | #define SPRN_DBG_SRR1 SPRN_DSRR1 | ||
72 | |||
73 | #define MC_SET_KSTACK \ | ||
74 | ld r1,PACA_MC_STACK(r13); \ | ||
75 | subi r1,r1,SPECIAL_EXC_FRAME_SIZE; | ||
76 | #define SPRN_MC_SRR0 SPRN_MCSRR0 | ||
77 | #define SPRN_MC_SRR1 SPRN_MCSRR1 | ||
78 | |||
79 | #define NORMAL_EXCEPTION_PROLOG(n, addition) \ | ||
80 | EXCEPTION_PROLOG(n, GEN, addition##_GEN) | ||
81 | |||
82 | #define CRIT_EXCEPTION_PROLOG(n, addition) \ | ||
83 | EXCEPTION_PROLOG(n, CRIT, addition##_CRIT) | ||
84 | |||
85 | #define DBG_EXCEPTION_PROLOG(n, addition) \ | ||
86 | EXCEPTION_PROLOG(n, DBG, addition##_DBG) | ||
87 | |||
88 | #define MC_EXCEPTION_PROLOG(n, addition) \ | ||
89 | EXCEPTION_PROLOG(n, MC, addition##_MC) | ||
90 | |||
91 | |||
92 | /* Variants of the "addition" argument for the prolog | ||
93 | */ | ||
94 | #define PROLOG_ADDITION_NONE_GEN | ||
95 | #define PROLOG_ADDITION_NONE_CRIT | ||
96 | #define PROLOG_ADDITION_NONE_DBG | ||
97 | #define PROLOG_ADDITION_NONE_MC | ||
98 | |||
99 | #define PROLOG_ADDITION_MASKABLE_GEN \ | ||
100 | lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ | ||
101 | cmpwi cr0,r11,0; /* yes -> go out of line */ \ | ||
102 | beq masked_interrupt_book3e; | ||
103 | |||
104 | #define PROLOG_ADDITION_2REGS_GEN \ | ||
105 | std r14,PACA_EXGEN+EX_R14(r13); \ | ||
106 | std r15,PACA_EXGEN+EX_R15(r13) | ||
107 | |||
108 | #define PROLOG_ADDITION_1REG_GEN \ | ||
109 | std r14,PACA_EXGEN+EX_R14(r13); | ||
110 | |||
111 | #define PROLOG_ADDITION_2REGS_CRIT \ | ||
112 | std r14,PACA_EXCRIT+EX_R14(r13); \ | ||
113 | std r15,PACA_EXCRIT+EX_R15(r13) | ||
114 | |||
115 | #define PROLOG_ADDITION_2REGS_DBG \ | ||
116 | std r14,PACA_EXDBG+EX_R14(r13); \ | ||
117 | std r15,PACA_EXDBG+EX_R15(r13) | ||
118 | |||
119 | #define PROLOG_ADDITION_2REGS_MC \ | ||
120 | std r14,PACA_EXMC+EX_R14(r13); \ | ||
121 | std r15,PACA_EXMC+EX_R15(r13) | ||
122 | |||
123 | /* Core exception code for all exceptions except TLB misses. | ||
124 | * XXX: Needs to make SPRN_SPRG_GEN depend on exception type | ||
125 | */ | ||
126 | #define EXCEPTION_COMMON(n, excf, ints) \ | ||
127 | std r0,GPR0(r1); /* save r0 in stackframe */ \ | ||
128 | std r2,GPR2(r1); /* save r2 in stackframe */ \ | ||
129 | SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ | ||
130 | SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ | ||
131 | std r9,GPR9(r1); /* save r9 in stackframe */ \ | ||
132 | std r10,_NIP(r1); /* save SRR0 to stackframe */ \ | ||
133 | std r11,_MSR(r1); /* save SRR1 to stackframe */ \ | ||
134 | ACCOUNT_CPU_USER_ENTRY(r10,r11);/* accounting (uses cr0+eq) */ \ | ||
135 | ld r3,excf+EX_R10(r13); /* get back r10 */ \ | ||
136 | ld r4,excf+EX_R11(r13); /* get back r11 */ \ | ||
137 | mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 */ \ | ||
138 | std r12,GPR12(r1); /* save r12 in stackframe */ \ | ||
139 | ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ | ||
140 | mflr r6; /* save LR in stackframe */ \ | ||
141 | mfctr r7; /* save CTR in stackframe */ \ | ||
142 | mfspr r8,SPRN_XER; /* save XER in stackframe */ \ | ||
143 | ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \ | ||
144 | lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \ | ||
145 | lbz r11,PACASOFTIRQEN(r13); /* get current IRQ softe */ \ | ||
146 | ld r12,exception_marker@toc(r2); \ | ||
147 | li r0,0; \ | ||
148 | std r3,GPR10(r1); /* save r10 to stackframe */ \ | ||
149 | std r4,GPR11(r1); /* save r11 to stackframe */ \ | ||
150 | std r5,GPR13(r1); /* save it to stackframe */ \ | ||
151 | std r6,_LINK(r1); \ | ||
152 | std r7,_CTR(r1); \ | ||
153 | std r8,_XER(r1); \ | ||
154 | li r3,(n)+1; /* indicate partial regs in trap */ \ | ||
155 | std r9,0(r1); /* store stack frame back link */ \ | ||
156 | std r10,_CCR(r1); /* store orig CR in stackframe */ \ | ||
157 | std r9,GPR1(r1); /* store stack frame back link */ \ | ||
158 | std r11,SOFTE(r1); /* and save it to stackframe */ \ | ||
159 | std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ | ||
160 | std r3,_TRAP(r1); /* set trap number */ \ | ||
161 | std r0,RESULT(r1); /* clear regs->result */ \ | ||
162 | ints; | ||
163 | |||
164 | /* Variants for the "ints" argument */ | ||
165 | #define INTS_KEEP | ||
166 | #define INTS_DISABLE_SOFT \ | ||
167 | stb r0,PACASOFTIRQEN(r13); /* mark interrupts soft-disabled */ \ | ||
168 | TRACE_DISABLE_INTS; | ||
169 | #define INTS_DISABLE_HARD \ | ||
170 | stb r0,PACAHARDIRQEN(r13); /* and hard disabled */ | ||
171 | #define INTS_DISABLE_ALL \ | ||
172 | INTS_DISABLE_SOFT \ | ||
173 | INTS_DISABLE_HARD | ||
174 | |||
175 | /* This is called by exceptions that used INTS_KEEP (that is did not clear | ||
176 | * neither soft nor hard IRQ indicators in the PACA. This will restore MSR:EE | ||
177 | * to it's previous value | ||
178 | * | ||
179 | * XXX In the long run, we may want to open-code it in order to separate the | ||
180 | * load from the wrtee, thus limiting the latency caused by the dependency | ||
181 | * but at this point, I'll favor code clarity until we have a near to final | ||
182 | * implementation | ||
183 | */ | ||
184 | #define INTS_RESTORE_HARD \ | ||
185 | ld r11,_MSR(r1); \ | ||
186 | wrtee r11; | ||
187 | |||
188 | /* XXX FIXME: Restore r14/r15 when necessary */ | ||
189 | #define BAD_STACK_TRAMPOLINE(n) \ | ||
190 | exc_##n##_bad_stack: \ | ||
191 | li r1,(n); /* get exception number */ \ | ||
192 | sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \ | ||
193 | b bad_stack_book3e; /* bad stack error */ | ||
194 | |||
195 | #define EXCEPTION_STUB(loc, label) \ | ||
196 | . = interrupt_base_book3e + loc; \ | ||
197 | nop; /* To make debug interrupts happy */ \ | ||
198 | b exc_##label##_book3e; | ||
199 | |||
200 | #define ACK_NONE(r) | ||
201 | #define ACK_DEC(r) \ | ||
202 | lis r,TSR_DIS@h; \ | ||
203 | mtspr SPRN_TSR,r | ||
204 | #define ACK_FIT(r) \ | ||
205 | lis r,TSR_FIS@h; \ | ||
206 | mtspr SPRN_TSR,r | ||
207 | |||
208 | #define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack) \ | ||
209 | START_EXCEPTION(label); \ | ||
210 | NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE) \ | ||
211 | EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE_ALL) \ | ||
212 | ack(r8); \ | ||
213 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
214 | bl hdlr; \ | ||
215 | b .ret_from_except_lite; | ||
216 | |||
217 | /* This value is used to mark exception frames on the stack. */ | ||
218 | .section ".toc","aw" | ||
219 | exception_marker: | ||
220 | .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER | ||
221 | |||
222 | |||
223 | /* | ||
224 | * And here we have the exception vectors ! | ||
225 | */ | ||
226 | |||
227 | .text | ||
228 | .balign 0x1000 | ||
229 | .globl interrupt_base_book3e | ||
230 | interrupt_base_book3e: /* fake trap */ | ||
231 | /* Note: If real debug exceptions are supported by the HW, the vector | ||
232 | * below will have to be patched up to point to an appropriate handler | ||
233 | */ | ||
234 | EXCEPTION_STUB(0x000, machine_check) /* 0x0200 */ | ||
235 | EXCEPTION_STUB(0x020, critical_input) /* 0x0580 */ | ||
236 | EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */ | ||
237 | EXCEPTION_STUB(0x060, data_storage) /* 0x0300 */ | ||
238 | EXCEPTION_STUB(0x080, instruction_storage) /* 0x0400 */ | ||
239 | EXCEPTION_STUB(0x0a0, external_input) /* 0x0500 */ | ||
240 | EXCEPTION_STUB(0x0c0, alignment) /* 0x0600 */ | ||
241 | EXCEPTION_STUB(0x0e0, program) /* 0x0700 */ | ||
242 | EXCEPTION_STUB(0x100, fp_unavailable) /* 0x0800 */ | ||
243 | EXCEPTION_STUB(0x120, system_call) /* 0x0c00 */ | ||
244 | EXCEPTION_STUB(0x140, ap_unavailable) /* 0x0f20 */ | ||
245 | EXCEPTION_STUB(0x160, decrementer) /* 0x0900 */ | ||
246 | EXCEPTION_STUB(0x180, fixed_interval) /* 0x0980 */ | ||
247 | EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */ | ||
248 | EXCEPTION_STUB(0x1c0, data_tlb_miss) | ||
249 | EXCEPTION_STUB(0x1e0, instruction_tlb_miss) | ||
250 | |||
251 | #if 0 | ||
252 | EXCEPTION_STUB(0x280, processor_doorbell) | ||
253 | EXCEPTION_STUB(0x220, processor_doorbell_crit) | ||
254 | #endif | ||
255 | .globl interrupt_end_book3e | ||
256 | interrupt_end_book3e: | ||
257 | |||
258 | /* Critical Input Interrupt */ | ||
259 | START_EXCEPTION(critical_input); | ||
260 | CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE) | ||
261 | // EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE_ALL) | ||
262 | // bl special_reg_save_crit | ||
263 | // addi r3,r1,STACK_FRAME_OVERHEAD | ||
264 | // bl .critical_exception | ||
265 | // b ret_from_crit_except | ||
266 | b . | ||
267 | |||
268 | /* Machine Check Interrupt */ | ||
269 | START_EXCEPTION(machine_check); | ||
270 | CRIT_EXCEPTION_PROLOG(0x200, PROLOG_ADDITION_NONE) | ||
271 | // EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE_ALL) | ||
272 | // bl special_reg_save_mc | ||
273 | // addi r3,r1,STACK_FRAME_OVERHEAD | ||
274 | // bl .machine_check_exception | ||
275 | // b ret_from_mc_except | ||
276 | b . | ||
277 | |||
278 | /* Data Storage Interrupt */ | ||
279 | START_EXCEPTION(data_storage) | ||
280 | NORMAL_EXCEPTION_PROLOG(0x300, PROLOG_ADDITION_2REGS) | ||
281 | mfspr r14,SPRN_DEAR | ||
282 | mfspr r15,SPRN_ESR | ||
283 | EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_KEEP) | ||
284 | b storage_fault_common | ||
285 | |||
286 | /* Instruction Storage Interrupt */ | ||
287 | START_EXCEPTION(instruction_storage); | ||
288 | NORMAL_EXCEPTION_PROLOG(0x400, PROLOG_ADDITION_2REGS) | ||
289 | li r15,0 | ||
290 | mr r14,r10 | ||
291 | EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_KEEP) | ||
292 | b storage_fault_common | ||
293 | |||
294 | /* External Input Interrupt */ | ||
295 | MASKABLE_EXCEPTION(0x500, external_input, .do_IRQ, ACK_NONE) | ||
296 | |||
297 | /* Alignment */ | ||
298 | START_EXCEPTION(alignment); | ||
299 | NORMAL_EXCEPTION_PROLOG(0x600, PROLOG_ADDITION_2REGS) | ||
300 | mfspr r14,SPRN_DEAR | ||
301 | mfspr r15,SPRN_ESR | ||
302 | EXCEPTION_COMMON(0x600, PACA_EXGEN, INTS_KEEP) | ||
303 | b alignment_more /* no room, go out of line */ | ||
304 | |||
305 | /* Program Interrupt */ | ||
306 | START_EXCEPTION(program); | ||
307 | NORMAL_EXCEPTION_PROLOG(0x700, PROLOG_ADDITION_1REG) | ||
308 | mfspr r14,SPRN_ESR | ||
309 | EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE_SOFT) | ||
310 | std r14,_DSISR(r1) | ||
311 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
312 | ld r14,PACA_EXGEN+EX_R14(r13) | ||
313 | bl .save_nvgprs | ||
314 | INTS_RESTORE_HARD | ||
315 | bl .program_check_exception | ||
316 | b .ret_from_except | ||
317 | |||
318 | /* Floating Point Unavailable Interrupt */ | ||
319 | START_EXCEPTION(fp_unavailable); | ||
320 | NORMAL_EXCEPTION_PROLOG(0x800, PROLOG_ADDITION_NONE) | ||
321 | /* we can probably do a shorter exception entry for that one... */ | ||
322 | EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP) | ||
323 | bne 1f /* if from user, just load it up */ | ||
324 | bl .save_nvgprs | ||
325 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
326 | INTS_RESTORE_HARD | ||
327 | bl .kernel_fp_unavailable_exception | ||
328 | BUG_OPCODE | ||
329 | 1: ld r12,_MSR(r1) | ||
330 | bl .load_up_fpu | ||
331 | b fast_exception_return | ||
332 | |||
333 | /* Decrementer Interrupt */ | ||
334 | MASKABLE_EXCEPTION(0x900, decrementer, .timer_interrupt, ACK_DEC) | ||
335 | |||
336 | /* Fixed Interval Timer Interrupt */ | ||
337 | MASKABLE_EXCEPTION(0x980, fixed_interval, .unknown_exception, ACK_FIT) | ||
338 | |||
339 | /* Watchdog Timer Interrupt */ | ||
340 | START_EXCEPTION(watchdog); | ||
341 | CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE) | ||
342 | // EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE_ALL) | ||
343 | // bl special_reg_save_crit | ||
344 | // addi r3,r1,STACK_FRAME_OVERHEAD | ||
345 | // bl .unknown_exception | ||
346 | // b ret_from_crit_except | ||
347 | b . | ||
348 | |||
349 | /* System Call Interrupt */ | ||
350 | START_EXCEPTION(system_call) | ||
351 | mr r9,r13 /* keep a copy of userland r13 */ | ||
352 | mfspr r11,SPRN_SRR0 /* get return address */ | ||
353 | mfspr r12,SPRN_SRR1 /* get previous MSR */ | ||
354 | mfspr r13,SPRN_SPRG_PACA /* get our PACA */ | ||
355 | b system_call_common | ||
356 | |||
357 | /* Auxillary Processor Unavailable Interrupt */ | ||
358 | START_EXCEPTION(ap_unavailable); | ||
359 | NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) | ||
360 | EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP) | ||
361 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
362 | bl .save_nvgprs | ||
363 | INTS_RESTORE_HARD | ||
364 | bl .unknown_exception | ||
365 | b .ret_from_except | ||
366 | |||
367 | /* Debug exception as a critical interrupt*/ | ||
368 | START_EXCEPTION(debug_crit); | ||
369 | CRIT_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS) | ||
370 | |||
371 | /* | ||
372 | * If there is a single step or branch-taken exception in an | ||
373 | * exception entry sequence, it was probably meant to apply to | ||
374 | * the code where the exception occurred (since exception entry | ||
375 | * doesn't turn off DE automatically). We simulate the effect | ||
376 | * of turning off DE on entry to an exception handler by turning | ||
377 | * off DE in the CSRR1 value and clearing the debug status. | ||
378 | */ | ||
379 | |||
380 | mfspr r14,SPRN_DBSR /* check single-step/branch taken */ | ||
381 | andis. r15,r14,DBSR_IC@h | ||
382 | beq+ 1f | ||
383 | |||
384 | LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) | ||
385 | LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e) | ||
386 | cmpld cr0,r10,r14 | ||
387 | cmpld cr1,r10,r15 | ||
388 | blt+ cr0,1f | ||
389 | bge+ cr1,1f | ||
390 | |||
391 | /* here it looks like we got an inappropriate debug exception. */ | ||
392 | lis r14,DBSR_IC@h /* clear the IC event */ | ||
393 | rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */ | ||
394 | mtspr SPRN_DBSR,r14 | ||
395 | mtspr SPRN_CSRR1,r11 | ||
396 | lwz r10,PACA_EXCRIT+EX_CR(r13) /* restore registers */ | ||
397 | ld r1,PACA_EXCRIT+EX_R1(r13) | ||
398 | ld r14,PACA_EXCRIT+EX_R14(r13) | ||
399 | ld r15,PACA_EXCRIT+EX_R15(r13) | ||
400 | mtcr r10 | ||
401 | ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */ | ||
402 | ld r11,PACA_EXCRIT+EX_R11(r13) | ||
403 | mfspr r13,SPRN_SPRG_CRIT_SCRATCH | ||
404 | rfci | ||
405 | |||
406 | /* Normal debug exception */ | ||
407 | /* XXX We only handle coming from userspace for now since we can't | ||
408 | * quite save properly an interrupted kernel state yet | ||
409 | */ | ||
410 | 1: andi. r14,r11,MSR_PR; /* check for userspace again */ | ||
411 | beq kernel_dbg_exc; /* if from kernel mode */ | ||
412 | |||
413 | /* Now we mash up things to make it look like we are coming on a | ||
414 | * normal exception | ||
415 | */ | ||
416 | mfspr r15,SPRN_SPRG_CRIT_SCRATCH | ||
417 | mtspr SPRN_SPRG_GEN_SCRATCH,r15 | ||
418 | mfspr r14,SPRN_DBSR | ||
419 | EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE_ALL) | ||
420 | std r14,_DSISR(r1) | ||
421 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
422 | mr r4,r14 | ||
423 | ld r14,PACA_EXCRIT+EX_R14(r13) | ||
424 | ld r15,PACA_EXCRIT+EX_R15(r13) | ||
425 | bl .save_nvgprs | ||
426 | bl .DebugException | ||
427 | b .ret_from_except | ||
428 | |||
429 | kernel_dbg_exc: | ||
430 | b . /* NYI */ | ||
431 | |||
432 | |||
433 | /* | ||
434 | * An interrupt came in while soft-disabled; clear EE in SRR1, | ||
435 | * clear paca->hard_enabled and return. | ||
436 | */ | ||
437 | masked_interrupt_book3e: | ||
438 | mtcr r10 | ||
439 | stb r11,PACAHARDIRQEN(r13) | ||
440 | mfspr r10,SPRN_SRR1 | ||
441 | rldicl r11,r10,48,1 /* clear MSR_EE */ | ||
442 | rotldi r10,r11,16 | ||
443 | mtspr SPRN_SRR1,r10 | ||
444 | ld r10,PACA_EXGEN+EX_R10(r13); /* restore registers */ | ||
445 | ld r11,PACA_EXGEN+EX_R11(r13); | ||
446 | mfspr r13,SPRN_SPRG_GEN_SCRATCH; | ||
447 | rfi | ||
448 | b . | ||
449 | |||
450 | /* | ||
451 | * This is called from 0x300 and 0x400 handlers after the prologs with | ||
452 | * r14 and r15 containing the fault address and error code, with the | ||
453 | * original values stashed away in the PACA | ||
454 | */ | ||
455 | storage_fault_common: | ||
456 | std r14,_DAR(r1) | ||
457 | std r15,_DSISR(r1) | ||
458 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
459 | mr r4,r14 | ||
460 | mr r5,r15 | ||
461 | ld r14,PACA_EXGEN+EX_R14(r13) | ||
462 | ld r15,PACA_EXGEN+EX_R15(r13) | ||
463 | INTS_RESTORE_HARD | ||
464 | bl .do_page_fault | ||
465 | cmpdi r3,0 | ||
466 | bne- 1f | ||
467 | b .ret_from_except_lite | ||
468 | 1: bl .save_nvgprs | ||
469 | mr r5,r3 | ||
470 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
471 | ld r4,_DAR(r1) | ||
472 | bl .bad_page_fault | ||
473 | b .ret_from_except | ||
474 | |||
475 | /* | ||
476 | * Alignment exception doesn't fit entirely in the 0x100 bytes so it | ||
477 | * continues here. | ||
478 | */ | ||
479 | alignment_more: | ||
480 | std r14,_DAR(r1) | ||
481 | std r15,_DSISR(r1) | ||
482 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
483 | ld r14,PACA_EXGEN+EX_R14(r13) | ||
484 | ld r15,PACA_EXGEN+EX_R15(r13) | ||
485 | bl .save_nvgprs | ||
486 | INTS_RESTORE_HARD | ||
487 | bl .alignment_exception | ||
488 | b .ret_from_except | ||
489 | |||
490 | /* | ||
491 | * We branch here from entry_64.S for the last stage of the exception | ||
492 | * return code path. MSR:EE is expected to be off at that point | ||
493 | */ | ||
494 | _GLOBAL(exception_return_book3e) | ||
495 | b 1f | ||
496 | |||
497 | /* This is the return from load_up_fpu fast path which could do with | ||
498 | * less GPR restores in fact, but for now we have a single return path | ||
499 | */ | ||
500 | .globl fast_exception_return | ||
501 | fast_exception_return: | ||
502 | wrteei 0 | ||
503 | 1: mr r0,r13 | ||
504 | ld r10,_MSR(r1) | ||
505 | REST_4GPRS(2, r1) | ||
506 | andi. r6,r10,MSR_PR | ||
507 | REST_2GPRS(6, r1) | ||
508 | beq 1f | ||
509 | ACCOUNT_CPU_USER_EXIT(r10, r11) | ||
510 | ld r0,GPR13(r1) | ||
511 | |||
512 | 1: stdcx. r0,0,r1 /* to clear the reservation */ | ||
513 | |||
514 | ld r8,_CCR(r1) | ||
515 | ld r9,_LINK(r1) | ||
516 | ld r10,_CTR(r1) | ||
517 | ld r11,_XER(r1) | ||
518 | mtcr r8 | ||
519 | mtlr r9 | ||
520 | mtctr r10 | ||
521 | mtxer r11 | ||
522 | REST_2GPRS(8, r1) | ||
523 | ld r10,GPR10(r1) | ||
524 | ld r11,GPR11(r1) | ||
525 | ld r12,GPR12(r1) | ||
526 | mtspr SPRN_SPRG_GEN_SCRATCH,r0 | ||
527 | |||
528 | std r10,PACA_EXGEN+EX_R10(r13); | ||
529 | std r11,PACA_EXGEN+EX_R11(r13); | ||
530 | ld r10,_NIP(r1) | ||
531 | ld r11,_MSR(r1) | ||
532 | ld r0,GPR0(r1) | ||
533 | ld r1,GPR1(r1) | ||
534 | mtspr SPRN_SRR0,r10 | ||
535 | mtspr SPRN_SRR1,r11 | ||
536 | ld r10,PACA_EXGEN+EX_R10(r13) | ||
537 | ld r11,PACA_EXGEN+EX_R11(r13) | ||
538 | mfspr r13,SPRN_SPRG_GEN_SCRATCH | ||
539 | rfi | ||
540 | |||
541 | /* | ||
542 | * Trampolines used when spotting a bad kernel stack pointer in | ||
543 | * the exception entry code. | ||
544 | * | ||
545 | * TODO: move some bits like SRR0 read to trampoline, pass PACA | ||
546 | * index around, etc... to handle crit & mcheck | ||
547 | */ | ||
548 | BAD_STACK_TRAMPOLINE(0x000) | ||
549 | BAD_STACK_TRAMPOLINE(0x100) | ||
550 | BAD_STACK_TRAMPOLINE(0x200) | ||
551 | BAD_STACK_TRAMPOLINE(0x300) | ||
552 | BAD_STACK_TRAMPOLINE(0x400) | ||
553 | BAD_STACK_TRAMPOLINE(0x500) | ||
554 | BAD_STACK_TRAMPOLINE(0x600) | ||
555 | BAD_STACK_TRAMPOLINE(0x700) | ||
556 | BAD_STACK_TRAMPOLINE(0x800) | ||
557 | BAD_STACK_TRAMPOLINE(0x900) | ||
558 | BAD_STACK_TRAMPOLINE(0x980) | ||
559 | BAD_STACK_TRAMPOLINE(0x9f0) | ||
560 | BAD_STACK_TRAMPOLINE(0xa00) | ||
561 | BAD_STACK_TRAMPOLINE(0xb00) | ||
562 | BAD_STACK_TRAMPOLINE(0xc00) | ||
563 | BAD_STACK_TRAMPOLINE(0xd00) | ||
564 | BAD_STACK_TRAMPOLINE(0xe00) | ||
565 | BAD_STACK_TRAMPOLINE(0xf00) | ||
566 | BAD_STACK_TRAMPOLINE(0xf20) | ||
567 | |||
568 | .globl bad_stack_book3e | ||
569 | bad_stack_book3e: | ||
570 | /* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */ | ||
571 | mfspr r10,SPRN_SRR0; /* read SRR0 before touching stack */ | ||
572 | ld r1,PACAEMERGSP(r13) | ||
573 | subi r1,r1,64+INT_FRAME_SIZE | ||
574 | std r10,_NIP(r1) | ||
575 | std r11,_MSR(r1) | ||
576 | ld r10,PACA_EXGEN+EX_R1(r13) /* FIXME for crit & mcheck */ | ||
577 | lwz r11,PACA_EXGEN+EX_CR(r13) /* FIXME for crit & mcheck */ | ||
578 | std r10,GPR1(r1) | ||
579 | std r11,_CCR(r1) | ||
580 | mfspr r10,SPRN_DEAR | ||
581 | mfspr r11,SPRN_ESR | ||
582 | std r10,_DAR(r1) | ||
583 | std r11,_DSISR(r1) | ||
584 | std r0,GPR0(r1); /* save r0 in stackframe */ \ | ||
585 | std r2,GPR2(r1); /* save r2 in stackframe */ \ | ||
586 | SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ | ||
587 | SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ | ||
588 | std r9,GPR9(r1); /* save r9 in stackframe */ \ | ||
589 | ld r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */ \ | ||
590 | ld r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */ \ | ||
591 | mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \ | ||
592 | std r3,GPR10(r1); /* save r10 to stackframe */ \ | ||
593 | std r4,GPR11(r1); /* save r11 to stackframe */ \ | ||
594 | std r12,GPR12(r1); /* save r12 in stackframe */ \ | ||
595 | std r5,GPR13(r1); /* save it to stackframe */ \ | ||
596 | mflr r10 | ||
597 | mfctr r11 | ||
598 | mfxer r12 | ||
599 | std r10,_LINK(r1) | ||
600 | std r11,_CTR(r1) | ||
601 | std r12,_XER(r1) | ||
602 | SAVE_10GPRS(14,r1) | ||
603 | SAVE_8GPRS(24,r1) | ||
604 | lhz r12,PACA_TRAP_SAVE(r13) | ||
605 | std r12,_TRAP(r1) | ||
606 | addi r11,r1,INT_FRAME_SIZE | ||
607 | std r11,0(r1) | ||
608 | li r12,0 | ||
609 | std r12,0(r11) | ||
610 | ld r2,PACATOC(r13) | ||
611 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
612 | bl .kernel_bad_stack | ||
613 | b 1b | ||
614 | |||
615 | /* | ||
616 | * Setup the initial TLB for a core. This current implementation | ||
617 | * assume that whatever we are running off will not conflict with | ||
618 | * the new mapping at PAGE_OFFSET. | ||
619 | */ | ||
620 | _GLOBAL(initial_tlb_book3e) | ||
621 | |||
622 | /* Look for the first TLB with IPROT set */ | ||
623 | mfspr r4,SPRN_TLB0CFG | ||
624 | andi. r3,r4,TLBnCFG_IPROT | ||
625 | lis r3,MAS0_TLBSEL(0)@h | ||
626 | bne found_iprot | ||
627 | |||
628 | mfspr r4,SPRN_TLB1CFG | ||
629 | andi. r3,r4,TLBnCFG_IPROT | ||
630 | lis r3,MAS0_TLBSEL(1)@h | ||
631 | bne found_iprot | ||
632 | |||
633 | mfspr r4,SPRN_TLB2CFG | ||
634 | andi. r3,r4,TLBnCFG_IPROT | ||
635 | lis r3,MAS0_TLBSEL(2)@h | ||
636 | bne found_iprot | ||
637 | |||
638 | lis r3,MAS0_TLBSEL(3)@h | ||
639 | mfspr r4,SPRN_TLB3CFG | ||
640 | /* fall through */ | ||
641 | |||
642 | found_iprot: | ||
643 | andi. r5,r4,TLBnCFG_HES | ||
644 | bne have_hes | ||
645 | |||
646 | mflr r8 /* save LR */ | ||
647 | /* 1. Find the index of the entry we're executing in | ||
648 | * | ||
649 | * r3 = MAS0_TLBSEL (for the iprot array) | ||
650 | * r4 = SPRN_TLBnCFG | ||
651 | */ | ||
652 | bl invstr /* Find our address */ | ||
653 | invstr: mflr r6 /* Make it accessible */ | ||
654 | mfmsr r7 | ||
655 | rlwinm r5,r7,27,31,31 /* extract MSR[IS] */ | ||
656 | mfspr r7,SPRN_PID | ||
657 | slwi r7,r7,16 | ||
658 | or r7,r7,r5 | ||
659 | mtspr SPRN_MAS6,r7 | ||
660 | tlbsx 0,r6 /* search MSR[IS], SPID=PID */ | ||
661 | |||
662 | mfspr r3,SPRN_MAS0 | ||
663 | rlwinm r5,r3,16,20,31 /* Extract MAS0(Entry) */ | ||
664 | |||
665 | mfspr r7,SPRN_MAS1 /* Insure IPROT set */ | ||
666 | oris r7,r7,MAS1_IPROT@h | ||
667 | mtspr SPRN_MAS1,r7 | ||
668 | tlbwe | ||
669 | |||
670 | /* 2. Invalidate all entries except the entry we're executing in | ||
671 | * | ||
672 | * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in | ||
673 | * r4 = SPRN_TLBnCFG | ||
674 | * r5 = ESEL of entry we are running in | ||
675 | */ | ||
676 | andi. r4,r4,TLBnCFG_N_ENTRY /* Extract # entries */ | ||
677 | li r6,0 /* Set Entry counter to 0 */ | ||
678 | 1: mr r7,r3 /* Set MAS0(TLBSEL) */ | ||
679 | rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ | ||
680 | mtspr SPRN_MAS0,r7 | ||
681 | tlbre | ||
682 | mfspr r7,SPRN_MAS1 | ||
683 | rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ | ||
684 | cmpw r5,r6 | ||
685 | beq skpinv /* Dont update the current execution TLB */ | ||
686 | mtspr SPRN_MAS1,r7 | ||
687 | tlbwe | ||
688 | isync | ||
689 | skpinv: addi r6,r6,1 /* Increment */ | ||
690 | cmpw r6,r4 /* Are we done? */ | ||
691 | bne 1b /* If not, repeat */ | ||
692 | |||
693 | /* Invalidate all TLBs */ | ||
694 | PPC_TLBILX_ALL(0,0) | ||
695 | sync | ||
696 | isync | ||
697 | |||
698 | /* 3. Setup a temp mapping and jump to it | ||
699 | * | ||
700 | * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in | ||
701 | * r5 = ESEL of entry we are running in | ||
702 | */ | ||
703 | andi. r7,r5,0x1 /* Find an entry not used and is non-zero */ | ||
704 | addi r7,r7,0x1 | ||
705 | mr r4,r3 /* Set MAS0(TLBSEL) = 1 */ | ||
706 | mtspr SPRN_MAS0,r4 | ||
707 | tlbre | ||
708 | |||
709 | rlwimi r4,r7,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r7) */ | ||
710 | mtspr SPRN_MAS0,r4 | ||
711 | |||
712 | mfspr r7,SPRN_MAS1 | ||
713 | xori r6,r7,MAS1_TS /* Setup TMP mapping in the other Address space */ | ||
714 | mtspr SPRN_MAS1,r6 | ||
715 | |||
716 | tlbwe | ||
717 | |||
718 | mfmsr r6 | ||
719 | xori r6,r6,MSR_IS | ||
720 | mtspr SPRN_SRR1,r6 | ||
721 | bl 1f /* Find our address */ | ||
722 | 1: mflr r6 | ||
723 | addi r6,r6,(2f - 1b) | ||
724 | mtspr SPRN_SRR0,r6 | ||
725 | rfi | ||
726 | 2: | ||
727 | |||
728 | /* 4. Clear out PIDs & Search info | ||
729 | * | ||
730 | * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in | ||
731 | * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping | ||
732 | * r5 = MAS3 | ||
733 | */ | ||
734 | li r6,0 | ||
735 | mtspr SPRN_MAS6,r6 | ||
736 | mtspr SPRN_PID,r6 | ||
737 | |||
738 | /* 5. Invalidate mapping we started in | ||
739 | * | ||
740 | * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in | ||
741 | * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping | ||
742 | * r5 = MAS3 | ||
743 | */ | ||
744 | mtspr SPRN_MAS0,r3 | ||
745 | tlbre | ||
746 | mfspr r6,SPRN_MAS1 | ||
747 | rlwinm r6,r6,0,2,0 /* clear IPROT */ | ||
748 | mtspr SPRN_MAS1,r6 | ||
749 | tlbwe | ||
750 | |||
751 | /* Invalidate TLB1 */ | ||
752 | PPC_TLBILX_ALL(0,0) | ||
753 | sync | ||
754 | isync | ||
755 | |||
756 | /* The mapping only needs to be cache-coherent on SMP */ | ||
757 | #ifdef CONFIG_SMP | ||
758 | #define M_IF_SMP MAS2_M | ||
759 | #else | ||
760 | #define M_IF_SMP 0 | ||
761 | #endif | ||
762 | |||
763 | /* 6. Setup KERNELBASE mapping in TLB[0] | ||
764 | * | ||
765 | * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in | ||
766 | * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping | ||
767 | * r5 = MAS3 | ||
768 | */ | ||
769 | rlwinm r3,r3,0,16,3 /* clear ESEL */ | ||
770 | mtspr SPRN_MAS0,r3 | ||
771 | lis r6,(MAS1_VALID|MAS1_IPROT)@h | ||
772 | ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l | ||
773 | mtspr SPRN_MAS1,r6 | ||
774 | |||
775 | LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | M_IF_SMP) | ||
776 | mtspr SPRN_MAS2,r6 | ||
777 | |||
778 | rlwinm r5,r5,0,0,25 | ||
779 | ori r5,r5,MAS3_SR | MAS3_SW | MAS3_SX | ||
780 | mtspr SPRN_MAS3,r5 | ||
781 | li r5,-1 | ||
782 | rlwinm r5,r5,0,0,25 | ||
783 | |||
784 | tlbwe | ||
785 | |||
786 | /* 7. Jump to KERNELBASE mapping | ||
787 | * | ||
788 | * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping | ||
789 | */ | ||
790 | /* Now we branch the new virtual address mapped by this entry */ | ||
791 | LOAD_REG_IMMEDIATE(r6,2f) | ||
792 | lis r7,MSR_KERNEL@h | ||
793 | ori r7,r7,MSR_KERNEL@l | ||
794 | mtspr SPRN_SRR0,r6 | ||
795 | mtspr SPRN_SRR1,r7 | ||
796 | rfi /* start execution out of TLB1[0] entry */ | ||
797 | 2: | ||
798 | |||
799 | /* 8. Clear out the temp mapping | ||
800 | * | ||
801 | * r4 = MAS0 w/TLBSEL & ESEL for the entry we are running in | ||
802 | */ | ||
803 | mtspr SPRN_MAS0,r4 | ||
804 | tlbre | ||
805 | mfspr r5,SPRN_MAS1 | ||
806 | rlwinm r5,r5,0,2,0 /* clear IPROT */ | ||
807 | mtspr SPRN_MAS1,r5 | ||
808 | tlbwe | ||
809 | |||
810 | /* Invalidate TLB1 */ | ||
811 | PPC_TLBILX_ALL(0,0) | ||
812 | sync | ||
813 | isync | ||
814 | |||
815 | /* We translate LR and return */ | ||
816 | tovirt(r8,r8) | ||
817 | mtlr r8 | ||
818 | blr | ||
819 | |||
820 | have_hes: | ||
821 | /* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the | ||
822 | * kernel linear mapping. We also set MAS8 once for all here though | ||
823 | * that will have to be made dependent on whether we are running under | ||
824 | * a hypervisor I suppose. | ||
825 | */ | ||
826 | ori r3,r3,MAS0_HES | MAS0_WQ_ALLWAYS | ||
827 | mtspr SPRN_MAS0,r3 | ||
828 | lis r3,(MAS1_VALID | MAS1_IPROT)@h | ||
829 | ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT | ||
830 | mtspr SPRN_MAS1,r3 | ||
831 | LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET | MAS2_M) | ||
832 | mtspr SPRN_MAS2,r3 | ||
833 | li r3,MAS3_SR | MAS3_SW | MAS3_SX | ||
834 | mtspr SPRN_MAS7_MAS3,r3 | ||
835 | li r3,0 | ||
836 | mtspr SPRN_MAS8,r3 | ||
837 | |||
838 | /* Write the TLB entry */ | ||
839 | tlbwe | ||
840 | |||
841 | /* Now we branch the new virtual address mapped by this entry */ | ||
842 | LOAD_REG_IMMEDIATE(r3,1f) | ||
843 | mtctr r3 | ||
844 | bctr | ||
845 | |||
846 | 1: /* We are now running at PAGE_OFFSET, clean the TLB of everything | ||
847 | * else (XXX we should scan for bolted crap from the firmware too) | ||
848 | */ | ||
849 | PPC_TLBILX(0,0,0) | ||
850 | sync | ||
851 | isync | ||
852 | |||
853 | /* We translate LR and return */ | ||
854 | mflr r3 | ||
855 | tovirt(r3,r3) | ||
856 | mtlr r3 | ||
857 | blr | ||
858 | |||
859 | /* | ||
860 | * Main entry (boot CPU, thread 0) | ||
861 | * | ||
862 | * We enter here from head_64.S, possibly after the prom_init trampoline | ||
863 | * with r3 and r4 already saved to r31 and 30 respectively and in 64 bits | ||
864 | * mode. Anything else is as it was left by the bootloader | ||
865 | * | ||
866 | * Initial requirements of this port: | ||
867 | * | ||
868 | * - Kernel loaded at 0 physical | ||
869 | * - A good lump of memory mapped 0:0 by UTLB entry 0 | ||
870 | * - MSR:IS & MSR:DS set to 0 | ||
871 | * | ||
872 | * Note that some of the above requirements will be relaxed in the future | ||
873 | * as the kernel becomes smarter at dealing with different initial conditions | ||
874 | * but for now you have to be careful | ||
875 | */ | ||
876 | _GLOBAL(start_initialization_book3e) | ||
877 | mflr r28 | ||
878 | |||
879 | /* First, we need to setup some initial TLBs to map the kernel | ||
880 | * text, data and bss at PAGE_OFFSET. We don't have a real mode | ||
881 | * and always use AS 0, so we just set it up to match our link | ||
882 | * address and never use 0 based addresses. | ||
883 | */ | ||
884 | bl .initial_tlb_book3e | ||
885 | |||
886 | /* Init global core bits */ | ||
887 | bl .init_core_book3e | ||
888 | |||
889 | /* Init per-thread bits */ | ||
890 | bl .init_thread_book3e | ||
891 | |||
892 | /* Return to common init code */ | ||
893 | tovirt(r28,r28) | ||
894 | mtlr r28 | ||
895 | blr | ||
896 | |||
897 | |||
898 | /* | ||
899 | * Secondary core/processor entry | ||
900 | * | ||
901 | * This is entered for thread 0 of a secondary core, all other threads | ||
902 | * are expected to be stopped. It's similar to start_initialization_book3e | ||
903 | * except that it's generally entered from the holding loop in head_64.S | ||
904 | * after CPUs have been gathered by Open Firmware. | ||
905 | * | ||
906 | * We assume we are in 32 bits mode running with whatever TLB entry was | ||
907 | * set for us by the firmware or POR engine. | ||
908 | */ | ||
909 | _GLOBAL(book3e_secondary_core_init_tlb_set) | ||
910 | li r4,1 | ||
911 | b .generic_secondary_smp_init | ||
912 | |||
913 | _GLOBAL(book3e_secondary_core_init) | ||
914 | mflr r28 | ||
915 | |||
916 | /* Do we need to setup initial TLB entry ? */ | ||
917 | cmplwi r4,0 | ||
918 | bne 2f | ||
919 | |||
920 | /* Setup TLB for this core */ | ||
921 | bl .initial_tlb_book3e | ||
922 | |||
923 | /* We can return from the above running at a different | ||
924 | * address, so recalculate r2 (TOC) | ||
925 | */ | ||
926 | bl .relative_toc | ||
927 | |||
928 | /* Init global core bits */ | ||
929 | 2: bl .init_core_book3e | ||
930 | |||
931 | /* Init per-thread bits */ | ||
932 | 3: bl .init_thread_book3e | ||
933 | |||
934 | /* Return to common init code at proper virtual address. | ||
935 | * | ||
936 | * Due to various previous assumptions, we know we entered this | ||
937 | * function at either the final PAGE_OFFSET mapping or using a | ||
938 | * 1:1 mapping at 0, so we don't bother doing a complicated check | ||
939 | * here, we just ensure the return address has the right top bits. | ||
940 | * | ||
941 | * Note that if we ever want to be smarter about where we can be | ||
942 | * started from, we have to be careful that by the time we reach | ||
943 | * the code below we may already be running at a different location | ||
944 | * than the one we were called from since initial_tlb_book3e can | ||
945 | * have moved us already. | ||
946 | */ | ||
947 | cmpdi cr0,r28,0 | ||
948 | blt 1f | ||
949 | lis r3,PAGE_OFFSET@highest | ||
950 | sldi r3,r3,32 | ||
951 | or r28,r28,r3 | ||
952 | 1: mtlr r28 | ||
953 | blr | ||
954 | |||
955 | _GLOBAL(book3e_secondary_thread_init) | ||
956 | mflr r28 | ||
957 | b 3b | ||
958 | |||
959 | _STATIC(init_core_book3e) | ||
960 | /* Establish the interrupt vector base */ | ||
961 | LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e) | ||
962 | mtspr SPRN_IVPR,r3 | ||
963 | sync | ||
964 | blr | ||
965 | |||
966 | _STATIC(init_thread_book3e) | ||
967 | lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h | ||
968 | mtspr SPRN_EPCR,r3 | ||
969 | |||
970 | /* Make sure interrupts are off */ | ||
971 | wrteei 0 | ||
972 | |||
973 | /* disable all timers and clear out status */ | ||
974 | li r3,0 | ||
975 | mtspr SPRN_TCR,r3 | ||
976 | mfspr r3,SPRN_TSR | ||
977 | mtspr SPRN_TSR,r3 | ||
978 | |||
979 | blr | ||
980 | |||
981 | _GLOBAL(__setup_base_ivors) | ||
982 | SET_IVOR(0, 0x020) /* Critical Input */ | ||
983 | SET_IVOR(1, 0x000) /* Machine Check */ | ||
984 | SET_IVOR(2, 0x060) /* Data Storage */ | ||
985 | SET_IVOR(3, 0x080) /* Instruction Storage */ | ||
986 | SET_IVOR(4, 0x0a0) /* External Input */ | ||
987 | SET_IVOR(5, 0x0c0) /* Alignment */ | ||
988 | SET_IVOR(6, 0x0e0) /* Program */ | ||
989 | SET_IVOR(7, 0x100) /* FP Unavailable */ | ||
990 | SET_IVOR(8, 0x120) /* System Call */ | ||
991 | SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */ | ||
992 | SET_IVOR(10, 0x160) /* Decrementer */ | ||
993 | SET_IVOR(11, 0x180) /* Fixed Interval Timer */ | ||
994 | SET_IVOR(12, 0x1a0) /* Watchdog Timer */ | ||
995 | SET_IVOR(13, 0x1c0) /* Data TLB Error */ | ||
996 | SET_IVOR(14, 0x1e0) /* Instruction TLB Error */ | ||
997 | SET_IVOR(15, 0x040) /* Debug */ | ||
998 | |||
999 | sync | ||
1000 | |||
1001 | blr | ||