diff options
Diffstat (limited to 'arch/powerpc/kernel/head_32.S')
-rw-r--r-- | arch/powerpc/kernel/head_32.S | 1399 |
1 files changed, 1399 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S new file mode 100644 index 000000000000..d9dbbd426744 --- /dev/null +++ b/arch/powerpc/kernel/head_32.S | |||
@@ -0,0 +1,1399 @@ | |||
1 | /* | ||
2 | * PowerPC version | ||
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
4 | * | ||
5 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | ||
6 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
7 | * Adapted for Power Macintosh by Paul Mackerras. | ||
8 | * Low-level exception handlers and MMU support | ||
9 | * rewritten by Paul Mackerras. | ||
10 | * Copyright (C) 1996 Paul Mackerras. | ||
11 | * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | ||
12 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
13 | * | ||
14 | * This file contains the low-level support and setup for the | ||
15 | * PowerPC platform, including trap and interrupt dispatch. | ||
16 | * (The PPC 8xx embedded CPUs use head_8xx.S instead.) | ||
17 | * | ||
18 | * This program is free software; you can redistribute it and/or | ||
19 | * modify it under the terms of the GNU General Public License | ||
20 | * as published by the Free Software Foundation; either version | ||
21 | * 2 of the License, or (at your option) any later version. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #include <linux/config.h> | ||
26 | #include <asm/processor.h> | ||
27 | #include <asm/page.h> | ||
28 | #include <asm/mmu.h> | ||
29 | #include <asm/pgtable.h> | ||
30 | #include <asm/cputable.h> | ||
31 | #include <asm/cache.h> | ||
32 | #include <asm/thread_info.h> | ||
33 | #include <asm/ppc_asm.h> | ||
34 | #include <asm/asm-offsets.h> | ||
35 | |||
36 | #ifdef CONFIG_APUS | ||
37 | #include <asm/amigappc.h> | ||
38 | #endif | ||
39 | |||
40 | /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ | ||
41 | #define LOAD_BAT(n, reg, RA, RB) \ | ||
42 | /* see the comment for clear_bats() -- Cort */ \ | ||
43 | li RA,0; \ | ||
44 | mtspr SPRN_IBAT##n##U,RA; \ | ||
45 | mtspr SPRN_DBAT##n##U,RA; \ | ||
46 | lwz RA,(n*16)+0(reg); \ | ||
47 | lwz RB,(n*16)+4(reg); \ | ||
48 | mtspr SPRN_IBAT##n##U,RA; \ | ||
49 | mtspr SPRN_IBAT##n##L,RB; \ | ||
50 | beq 1f; \ | ||
51 | lwz RA,(n*16)+8(reg); \ | ||
52 | lwz RB,(n*16)+12(reg); \ | ||
53 | mtspr SPRN_DBAT##n##U,RA; \ | ||
54 | mtspr SPRN_DBAT##n##L,RB; \ | ||
55 | 1: | ||
56 | |||
57 | .text | ||
58 | .stabs "arch/ppc/kernel/",N_SO,0,0,0f | ||
59 | .stabs "head.S",N_SO,0,0,0f | ||
60 | 0: | ||
61 | .globl _stext | ||
62 | _stext: | ||
63 | |||
64 | /* | ||
65 | * _start is defined this way because the XCOFF loader in the OpenFirmware | ||
66 | * on the powermac expects the entry point to be a procedure descriptor. | ||
67 | */ | ||
68 | .text | ||
69 | .globl _start | ||
70 | _start: | ||
71 | /* | ||
72 | * These are here for legacy reasons, the kernel used to | ||
73 | * need to look like a coff function entry for the pmac | ||
74 | * but we're always started by some kind of bootloader now. | ||
75 | * -- Cort | ||
76 | */ | ||
77 | nop /* used by __secondary_hold on prep (mtx) and chrp smp */ | ||
78 | nop /* used by __secondary_hold on prep (mtx) and chrp smp */ | ||
79 | nop | ||
80 | |||
81 | /* PMAC | ||
82 | * Enter here with the kernel text, data and bss loaded starting at | ||
83 | * 0, running with virtual == physical mapping. | ||
84 | * r5 points to the prom entry point (the client interface handler | ||
85 | * address). Address translation is turned on, with the prom | ||
86 | * managing the hash table. Interrupts are disabled. The stack | ||
87 | * pointer (r1) points to just below the end of the half-meg region | ||
88 | * from 0x380000 - 0x400000, which is mapped in already. | ||
89 | * | ||
90 | * If we are booted from MacOS via BootX, we enter with the kernel | ||
91 | * image loaded somewhere, and the following values in registers: | ||
92 | * r3: 'BooX' (0x426f6f58) | ||
93 | * r4: virtual address of boot_infos_t | ||
94 | * r5: 0 | ||
95 | * | ||
96 | * APUS | ||
97 | * r3: 'APUS' | ||
98 | * r4: physical address of memory base | ||
99 | * Linux/m68k style BootInfo structure at &_end. | ||
100 | * | ||
101 | * PREP | ||
102 | * This is jumped to on prep systems right after the kernel is relocated | ||
103 | * to its proper place in memory by the boot loader. The expected layout | ||
104 | * of the regs is: | ||
105 | * r3: ptr to residual data | ||
106 | * r4: initrd_start or if no initrd then 0 | ||
107 | * r5: initrd_end - unused if r4 is 0 | ||
108 | * r6: Start of command line string | ||
109 | * r7: End of command line string | ||
110 | * | ||
111 | * This just gets a minimal mmu environment setup so we can call | ||
112 | * start_here() to do the real work. | ||
113 | * -- Cort | ||
114 | */ | ||
115 | |||
116 | .globl __start | ||
117 | __start: | ||
118 | /* | ||
119 | * We have to do any OF calls before we map ourselves to KERNELBASE, | ||
120 | * because OF may have I/O devices mapped into that area | ||
121 | * (particularly on CHRP). | ||
122 | */ | ||
123 | cmpwi 0,r5,0 | ||
124 | beq 1f | ||
125 | bl prom_init | ||
126 | trap | ||
127 | |||
128 | 1: mr r31,r3 /* save parameters */ | ||
129 | mr r30,r4 | ||
130 | li r24,0 /* cpu # */ | ||
131 | |||
132 | /* | ||
133 | * early_init() does the early machine identification and does | ||
134 | * the necessary low-level setup and clears the BSS | ||
135 | * -- Cort <cort@fsmlabs.com> | ||
136 | */ | ||
137 | bl early_init | ||
138 | |||
139 | #ifdef CONFIG_APUS | ||
140 | /* On APUS the __va/__pa constants need to be set to the correct | ||
141 | * values before continuing. | ||
142 | */ | ||
143 | mr r4,r30 | ||
144 | bl fix_mem_constants | ||
145 | #endif /* CONFIG_APUS */ | ||
146 | |||
147 | /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains | ||
148 | * the physical address we are running at, returned by early_init() | ||
149 | */ | ||
150 | bl mmu_off | ||
151 | __after_mmu_off: | ||
152 | bl clear_bats | ||
153 | bl flush_tlbs | ||
154 | |||
155 | bl initial_bats | ||
156 | #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) | ||
157 | bl setup_disp_bat | ||
158 | #endif | ||
159 | |||
160 | /* | ||
161 | * Call setup_cpu for CPU 0 and initialize 6xx Idle | ||
162 | */ | ||
163 | bl reloc_offset | ||
164 | li r24,0 /* cpu# */ | ||
165 | bl call_setup_cpu /* Call setup_cpu for this CPU */ | ||
166 | #ifdef CONFIG_6xx | ||
167 | bl reloc_offset | ||
168 | bl init_idle_6xx | ||
169 | #endif /* CONFIG_6xx */ | ||
170 | |||
171 | |||
172 | #ifndef CONFIG_APUS | ||
173 | /* | ||
174 | * We need to run with _start at physical address 0. | ||
175 | * On CHRP, we are loaded at 0x10000 since OF on CHRP uses | ||
176 | * the exception vectors at 0 (and therefore this copy | ||
177 | * overwrites OF's exception vectors with our own). | ||
178 | * The MMU is off at this point. | ||
179 | */ | ||
180 | bl reloc_offset | ||
181 | mr r26,r3 | ||
182 | addis r4,r3,KERNELBASE@h /* current address of _start */ | ||
183 | cmpwi 0,r4,0 /* are we already running at 0? */ | ||
184 | bne relocate_kernel | ||
185 | #endif /* CONFIG_APUS */ | ||
186 | /* | ||
187 | * we now have the 1st 16M of ram mapped with the bats. | ||
188 | * prep needs the mmu to be turned on here, but pmac already has it on. | ||
189 | * this shouldn't bother the pmac since it just gets turned on again | ||
190 | * as we jump to our code at KERNELBASE. -- Cort | ||
191 | * Actually no, pmac doesn't have it on any more. BootX enters with MMU | ||
192 | * off, and in other cases, we now turn it off before changing BATs above. | ||
193 | */ | ||
194 | turn_on_mmu: | ||
195 | mfmsr r0 | ||
196 | ori r0,r0,MSR_DR|MSR_IR | ||
197 | mtspr SPRN_SRR1,r0 | ||
198 | lis r0,start_here@h | ||
199 | ori r0,r0,start_here@l | ||
200 | mtspr SPRN_SRR0,r0 | ||
201 | SYNC | ||
202 | RFI /* enables MMU */ | ||
203 | |||
204 | /* | ||
205 | * We need __secondary_hold as a place to hold the other cpus on | ||
206 | * an SMP machine, even when we are running a UP kernel. | ||
207 | */ | ||
208 | . = 0xc0 /* for prep bootloader */ | ||
209 | li r3,1 /* MTX only has 1 cpu */ | ||
210 | .globl __secondary_hold | ||
211 | __secondary_hold: | ||
212 | /* tell the master we're here */ | ||
213 | stw r3,4(0) | ||
214 | #ifdef CONFIG_SMP | ||
215 | 100: lwz r4,0(0) | ||
216 | /* wait until we're told to start */ | ||
217 | cmpw 0,r4,r3 | ||
218 | bne 100b | ||
219 | /* our cpu # was at addr 0 - go */ | ||
220 | mr r24,r3 /* cpu # */ | ||
221 | b __secondary_start | ||
222 | #else | ||
223 | b . | ||
224 | #endif /* CONFIG_SMP */ | ||
225 | |||
226 | /* | ||
227 | * Exception entry code. This code runs with address translation | ||
228 | * turned off, i.e. using physical addresses. | ||
229 | * We assume sprg3 has the physical address of the current | ||
230 | * task's thread_struct. | ||
231 | */ | ||
232 | #define EXCEPTION_PROLOG \ | ||
233 | mtspr SPRN_SPRG0,r10; \ | ||
234 | mtspr SPRN_SPRG1,r11; \ | ||
235 | mfcr r10; \ | ||
236 | EXCEPTION_PROLOG_1; \ | ||
237 | EXCEPTION_PROLOG_2 | ||
238 | |||
239 | #define EXCEPTION_PROLOG_1 \ | ||
240 | mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \ | ||
241 | andi. r11,r11,MSR_PR; \ | ||
242 | tophys(r11,r1); /* use tophys(r1) if kernel */ \ | ||
243 | beq 1f; \ | ||
244 | mfspr r11,SPRN_SPRG3; \ | ||
245 | lwz r11,THREAD_INFO-THREAD(r11); \ | ||
246 | addi r11,r11,THREAD_SIZE; \ | ||
247 | tophys(r11,r11); \ | ||
248 | 1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */ | ||
249 | |||
250 | |||
251 | #define EXCEPTION_PROLOG_2 \ | ||
252 | CLR_TOP32(r11); \ | ||
253 | stw r10,_CCR(r11); /* save registers */ \ | ||
254 | stw r12,GPR12(r11); \ | ||
255 | stw r9,GPR9(r11); \ | ||
256 | mfspr r10,SPRN_SPRG0; \ | ||
257 | stw r10,GPR10(r11); \ | ||
258 | mfspr r12,SPRN_SPRG1; \ | ||
259 | stw r12,GPR11(r11); \ | ||
260 | mflr r10; \ | ||
261 | stw r10,_LINK(r11); \ | ||
262 | mfspr r12,SPRN_SRR0; \ | ||
263 | mfspr r9,SPRN_SRR1; \ | ||
264 | stw r1,GPR1(r11); \ | ||
265 | stw r1,0(r11); \ | ||
266 | tovirt(r1,r11); /* set new kernel sp */ \ | ||
267 | li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ | ||
268 | MTMSRD(r10); /* (except for mach check in rtas) */ \ | ||
269 | stw r0,GPR0(r11); \ | ||
270 | SAVE_4GPRS(3, r11); \ | ||
271 | SAVE_2GPRS(7, r11) | ||
272 | |||
273 | /* | ||
274 | * Note: code which follows this uses cr0.eq (set if from kernel), | ||
275 | * r11, r12 (SRR0), and r9 (SRR1). | ||
276 | * | ||
277 | * Note2: once we have set r1 we are in a position to take exceptions | ||
278 | * again, and we could thus set MSR:RI at that point. | ||
279 | */ | ||
280 | |||
281 | /* | ||
282 | * Exception vectors. | ||
283 | */ | ||
284 | #define EXCEPTION(n, label, hdlr, xfer) \ | ||
285 | . = n; \ | ||
286 | label: \ | ||
287 | EXCEPTION_PROLOG; \ | ||
288 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
289 | xfer(n, hdlr) | ||
290 | |||
291 | #define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \ | ||
292 | li r10,trap; \ | ||
293 | stw r10,TRAP(r11); \ | ||
294 | li r10,MSR_KERNEL; \ | ||
295 | copyee(r10, r9); \ | ||
296 | bl tfer; \ | ||
297 | i##n: \ | ||
298 | .long hdlr; \ | ||
299 | .long ret | ||
300 | |||
301 | #define COPY_EE(d, s) rlwimi d,s,0,16,16 | ||
302 | #define NOCOPY(d, s) | ||
303 | |||
304 | #define EXC_XFER_STD(n, hdlr) \ | ||
305 | EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \ | ||
306 | ret_from_except_full) | ||
307 | |||
308 | #define EXC_XFER_LITE(n, hdlr) \ | ||
309 | EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \ | ||
310 | ret_from_except) | ||
311 | |||
312 | #define EXC_XFER_EE(n, hdlr) \ | ||
313 | EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \ | ||
314 | ret_from_except_full) | ||
315 | |||
316 | #define EXC_XFER_EE_LITE(n, hdlr) \ | ||
317 | EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \ | ||
318 | ret_from_except) | ||
319 | |||
320 | /* System reset */ | ||
321 | /* core99 pmac starts the seconary here by changing the vector, and | ||
322 | putting it back to what it was (unknown_exception) when done. */ | ||
323 | #if defined(CONFIG_GEMINI) && defined(CONFIG_SMP) | ||
324 | . = 0x100 | ||
325 | b __secondary_start_gemini | ||
326 | #else | ||
327 | EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD) | ||
328 | #endif | ||
329 | |||
330 | /* Machine check */ | ||
331 | /* | ||
332 | * On CHRP, this is complicated by the fact that we could get a | ||
333 | * machine check inside RTAS, and we have no guarantee that certain | ||
334 | * critical registers will have the values we expect. The set of | ||
335 | * registers that might have bad values includes all the GPRs | ||
336 | * and all the BATs. We indicate that we are in RTAS by putting | ||
337 | * a non-zero value, the address of the exception frame to use, | ||
338 | * in SPRG2. The machine check handler checks SPRG2 and uses its | ||
339 | * value if it is non-zero. If we ever needed to free up SPRG2, | ||
340 | * we could use a field in the thread_info or thread_struct instead. | ||
341 | * (Other exception handlers assume that r1 is a valid kernel stack | ||
342 | * pointer when we take an exception from supervisor mode.) | ||
343 | * -- paulus. | ||
344 | */ | ||
345 | . = 0x200 | ||
346 | mtspr SPRN_SPRG0,r10 | ||
347 | mtspr SPRN_SPRG1,r11 | ||
348 | mfcr r10 | ||
349 | #ifdef CONFIG_PPC_CHRP | ||
350 | mfspr r11,SPRN_SPRG2 | ||
351 | cmpwi 0,r11,0 | ||
352 | bne 7f | ||
353 | #endif /* CONFIG_PPC_CHRP */ | ||
354 | EXCEPTION_PROLOG_1 | ||
355 | 7: EXCEPTION_PROLOG_2 | ||
356 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
357 | #ifdef CONFIG_PPC_CHRP | ||
358 | mfspr r4,SPRN_SPRG2 | ||
359 | cmpwi cr1,r4,0 | ||
360 | bne cr1,1f | ||
361 | #endif | ||
362 | EXC_XFER_STD(0x200, machine_check_exception) | ||
363 | #ifdef CONFIG_PPC_CHRP | ||
364 | 1: b machine_check_in_rtas | ||
365 | #endif | ||
366 | |||
367 | /* Data access exception. */ | ||
368 | . = 0x300 | ||
369 | DataAccess: | ||
370 | EXCEPTION_PROLOG | ||
371 | mfspr r10,SPRN_DSISR | ||
372 | andis. r0,r10,0xa470 /* weird error? */ | ||
373 | bne 1f /* if not, try to put a PTE */ | ||
374 | mfspr r4,SPRN_DAR /* into the hash table */ | ||
375 | rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ | ||
376 | bl hash_page | ||
377 | 1: stw r10,_DSISR(r11) | ||
378 | mr r5,r10 | ||
379 | mfspr r4,SPRN_DAR | ||
380 | EXC_XFER_EE_LITE(0x300, handle_page_fault) | ||
381 | |||
382 | |||
383 | /* Instruction access exception. */ | ||
384 | . = 0x400 | ||
385 | InstructionAccess: | ||
386 | EXCEPTION_PROLOG | ||
387 | andis. r0,r9,0x4000 /* no pte found? */ | ||
388 | beq 1f /* if so, try to put a PTE */ | ||
389 | li r3,0 /* into the hash table */ | ||
390 | mr r4,r12 /* SRR0 is fault address */ | ||
391 | bl hash_page | ||
392 | 1: mr r4,r12 | ||
393 | mr r5,r9 | ||
394 | EXC_XFER_EE_LITE(0x400, handle_page_fault) | ||
395 | |||
396 | /* External interrupt */ | ||
397 | EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) | ||
398 | |||
399 | /* Alignment exception */ | ||
400 | . = 0x600 | ||
401 | Alignment: | ||
402 | EXCEPTION_PROLOG | ||
403 | mfspr r4,SPRN_DAR | ||
404 | stw r4,_DAR(r11) | ||
405 | mfspr r5,SPRN_DSISR | ||
406 | stw r5,_DSISR(r11) | ||
407 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
408 | EXC_XFER_EE(0x600, alignment_exception) | ||
409 | |||
410 | /* Program check exception */ | ||
411 | EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD) | ||
412 | |||
413 | /* Floating-point unavailable */ | ||
414 | . = 0x800 | ||
415 | FPUnavailable: | ||
416 | EXCEPTION_PROLOG | ||
417 | bne load_up_fpu /* if from user, just load it up */ | ||
418 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
419 | EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception) | ||
420 | |||
421 | /* Decrementer */ | ||
422 | EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) | ||
423 | |||
424 | EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE) | ||
425 | EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE) | ||
426 | |||
427 | /* System call */ | ||
428 | . = 0xc00 | ||
429 | SystemCall: | ||
430 | EXCEPTION_PROLOG | ||
431 | EXC_XFER_EE_LITE(0xc00, DoSyscall) | ||
432 | |||
433 | /* Single step - not used on 601 */ | ||
434 | EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD) | ||
435 | EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE) | ||
436 | |||
437 | /* | ||
438 | * The Altivec unavailable trap is at 0x0f20. Foo. | ||
439 | * We effectively remap it to 0x3000. | ||
440 | * We include an altivec unavailable exception vector even if | ||
441 | * not configured for Altivec, so that you can't panic a | ||
442 | * non-altivec kernel running on a machine with altivec just | ||
443 | * by executing an altivec instruction. | ||
444 | */ | ||
445 | . = 0xf00 | ||
446 | b Trap_0f | ||
447 | |||
448 | . = 0xf20 | ||
449 | b AltiVecUnavailable | ||
450 | |||
451 | Trap_0f: | ||
452 | EXCEPTION_PROLOG | ||
453 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
454 | EXC_XFER_EE(0xf00, unknown_exception) | ||
455 | |||
456 | /* | ||
457 | * Handle TLB miss for instruction on 603/603e. | ||
458 | * Note: we get an alternate set of r0 - r3 to use automatically. | ||
459 | */ | ||
460 | . = 0x1000 | ||
461 | InstructionTLBMiss: | ||
462 | /* | ||
463 | * r0: stored ctr | ||
464 | * r1: linux style pte ( later becomes ppc hardware pte ) | ||
465 | * r2: ptr to linux-style pte | ||
466 | * r3: scratch | ||
467 | */ | ||
468 | mfctr r0 | ||
469 | /* Get PTE (linux-style) and check access */ | ||
470 | mfspr r3,SPRN_IMISS | ||
471 | lis r1,KERNELBASE@h /* check if kernel address */ | ||
472 | cmplw 0,r3,r1 | ||
473 | mfspr r2,SPRN_SPRG3 | ||
474 | li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ | ||
475 | lwz r2,PGDIR(r2) | ||
476 | blt+ 112f | ||
477 | lis r2,swapper_pg_dir@ha /* if kernel address, use */ | ||
478 | addi r2,r2,swapper_pg_dir@l /* kernel page table */ | ||
479 | mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ | ||
480 | rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ | ||
481 | 112: tophys(r2,r2) | ||
482 | rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ | ||
483 | lwz r2,0(r2) /* get pmd entry */ | ||
484 | rlwinm. r2,r2,0,0,19 /* extract address of pte page */ | ||
485 | beq- InstructionAddressInvalid /* return if no mapping */ | ||
486 | rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ | ||
487 | lwz r3,0(r2) /* get linux-style pte */ | ||
488 | andc. r1,r1,r3 /* check access & ~permission */ | ||
489 | bne- InstructionAddressInvalid /* return if access not permitted */ | ||
490 | ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ | ||
491 | /* | ||
492 | * NOTE! We are assuming this is not an SMP system, otherwise | ||
493 | * we would need to update the pte atomically with lwarx/stwcx. | ||
494 | */ | ||
495 | stw r3,0(r2) /* update PTE (accessed bit) */ | ||
496 | /* Convert linux-style PTE to low word of PPC-style PTE */ | ||
497 | rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ | ||
498 | rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ | ||
499 | and r1,r1,r2 /* writable if _RW and _DIRTY */ | ||
500 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | ||
501 | rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ | ||
502 | ori r1,r1,0xe14 /* clear out reserved bits and M */ | ||
503 | andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ | ||
504 | mtspr SPRN_RPA,r1 | ||
505 | mfspr r3,SPRN_IMISS | ||
506 | tlbli r3 | ||
507 | mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ | ||
508 | mtcrf 0x80,r3 | ||
509 | rfi | ||
510 | InstructionAddressInvalid: | ||
511 | mfspr r3,SPRN_SRR1 | ||
512 | rlwinm r1,r3,9,6,6 /* Get load/store bit */ | ||
513 | |||
514 | addis r1,r1,0x2000 | ||
515 | mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */ | ||
516 | mtctr r0 /* Restore CTR */ | ||
517 | andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ | ||
518 | or r2,r2,r1 | ||
519 | mtspr SPRN_SRR1,r2 | ||
520 | mfspr r1,SPRN_IMISS /* Get failing address */ | ||
521 | rlwinm. r2,r2,0,31,31 /* Check for little endian access */ | ||
522 | rlwimi r2,r2,1,30,30 /* change 1 -> 3 */ | ||
523 | xor r1,r1,r2 | ||
524 | mtspr SPRN_DAR,r1 /* Set fault address */ | ||
525 | mfmsr r0 /* Restore "normal" registers */ | ||
526 | xoris r0,r0,MSR_TGPR>>16 | ||
527 | mtcrf 0x80,r3 /* Restore CR0 */ | ||
528 | mtmsr r0 | ||
529 | b InstructionAccess | ||
530 | |||
531 | /* | ||
532 | * Handle TLB miss for DATA Load operation on 603/603e | ||
533 | */ | ||
534 | . = 0x1100 | ||
535 | DataLoadTLBMiss: | ||
536 | /* | ||
537 | * r0: stored ctr | ||
538 | * r1: linux style pte ( later becomes ppc hardware pte ) | ||
539 | * r2: ptr to linux-style pte | ||
540 | * r3: scratch | ||
541 | */ | ||
542 | mfctr r0 | ||
543 | /* Get PTE (linux-style) and check access */ | ||
544 | mfspr r3,SPRN_DMISS | ||
545 | lis r1,KERNELBASE@h /* check if kernel address */ | ||
546 | cmplw 0,r3,r1 | ||
547 | mfspr r2,SPRN_SPRG3 | ||
548 | li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ | ||
549 | lwz r2,PGDIR(r2) | ||
550 | blt+ 112f | ||
551 | lis r2,swapper_pg_dir@ha /* if kernel address, use */ | ||
552 | addi r2,r2,swapper_pg_dir@l /* kernel page table */ | ||
553 | mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ | ||
554 | rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ | ||
555 | 112: tophys(r2,r2) | ||
556 | rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ | ||
557 | lwz r2,0(r2) /* get pmd entry */ | ||
558 | rlwinm. r2,r2,0,0,19 /* extract address of pte page */ | ||
559 | beq- DataAddressInvalid /* return if no mapping */ | ||
560 | rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ | ||
561 | lwz r3,0(r2) /* get linux-style pte */ | ||
562 | andc. r1,r1,r3 /* check access & ~permission */ | ||
563 | bne- DataAddressInvalid /* return if access not permitted */ | ||
564 | ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ | ||
565 | /* | ||
566 | * NOTE! We are assuming this is not an SMP system, otherwise | ||
567 | * we would need to update the pte atomically with lwarx/stwcx. | ||
568 | */ | ||
569 | stw r3,0(r2) /* update PTE (accessed bit) */ | ||
570 | /* Convert linux-style PTE to low word of PPC-style PTE */ | ||
571 | rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ | ||
572 | rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ | ||
573 | and r1,r1,r2 /* writable if _RW and _DIRTY */ | ||
574 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | ||
575 | rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ | ||
576 | ori r1,r1,0xe14 /* clear out reserved bits and M */ | ||
577 | andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ | ||
578 | mtspr SPRN_RPA,r1 | ||
579 | mfspr r3,SPRN_DMISS | ||
580 | tlbld r3 | ||
581 | mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ | ||
582 | mtcrf 0x80,r3 | ||
583 | rfi | ||
584 | DataAddressInvalid: | ||
585 | mfspr r3,SPRN_SRR1 | ||
586 | rlwinm r1,r3,9,6,6 /* Get load/store bit */ | ||
587 | addis r1,r1,0x2000 | ||
588 | mtspr SPRN_DSISR,r1 | ||
589 | mtctr r0 /* Restore CTR */ | ||
590 | andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ | ||
591 | mtspr SPRN_SRR1,r2 | ||
592 | mfspr r1,SPRN_DMISS /* Get failing address */ | ||
593 | rlwinm. r2,r2,0,31,31 /* Check for little endian access */ | ||
594 | beq 20f /* Jump if big endian */ | ||
595 | xori r1,r1,3 | ||
596 | 20: mtspr SPRN_DAR,r1 /* Set fault address */ | ||
597 | mfmsr r0 /* Restore "normal" registers */ | ||
598 | xoris r0,r0,MSR_TGPR>>16 | ||
599 | mtcrf 0x80,r3 /* Restore CR0 */ | ||
600 | mtmsr r0 | ||
601 | b DataAccess | ||
602 | |||
603 | /* | ||
604 | * Handle TLB miss for DATA Store on 603/603e | ||
605 | */ | ||
606 | . = 0x1200 | ||
607 | DataStoreTLBMiss: | ||
608 | /* | ||
609 | * r0: stored ctr | ||
610 | * r1: linux style pte ( later becomes ppc hardware pte ) | ||
611 | * r2: ptr to linux-style pte | ||
612 | * r3: scratch | ||
613 | */ | ||
614 | mfctr r0 | ||
615 | /* Get PTE (linux-style) and check access */ | ||
616 | mfspr r3,SPRN_DMISS | ||
617 | lis r1,KERNELBASE@h /* check if kernel address */ | ||
618 | cmplw 0,r3,r1 | ||
619 | mfspr r2,SPRN_SPRG3 | ||
620 | li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */ | ||
621 | lwz r2,PGDIR(r2) | ||
622 | blt+ 112f | ||
623 | lis r2,swapper_pg_dir@ha /* if kernel address, use */ | ||
624 | addi r2,r2,swapper_pg_dir@l /* kernel page table */ | ||
625 | mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ | ||
626 | rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ | ||
627 | 112: tophys(r2,r2) | ||
628 | rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ | ||
629 | lwz r2,0(r2) /* get pmd entry */ | ||
630 | rlwinm. r2,r2,0,0,19 /* extract address of pte page */ | ||
631 | beq- DataAddressInvalid /* return if no mapping */ | ||
632 | rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ | ||
633 | lwz r3,0(r2) /* get linux-style pte */ | ||
634 | andc. r1,r1,r3 /* check access & ~permission */ | ||
635 | bne- DataAddressInvalid /* return if access not permitted */ | ||
636 | ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY | ||
637 | /* | ||
638 | * NOTE! We are assuming this is not an SMP system, otherwise | ||
639 | * we would need to update the pte atomically with lwarx/stwcx. | ||
640 | */ | ||
641 | stw r3,0(r2) /* update PTE (accessed/dirty bits) */ | ||
642 | /* Convert linux-style PTE to low word of PPC-style PTE */ | ||
643 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | ||
644 | li r1,0xe15 /* clear out reserved bits and M */ | ||
645 | andc r1,r3,r1 /* PP = user? 2: 0 */ | ||
646 | mtspr SPRN_RPA,r1 | ||
647 | mfspr r3,SPRN_DMISS | ||
648 | tlbld r3 | ||
649 | mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ | ||
650 | mtcrf 0x80,r3 | ||
651 | rfi | ||
652 | |||
653 | #ifndef CONFIG_ALTIVEC | ||
654 | #define altivec_assist_exception unknown_exception | ||
655 | #endif | ||
656 | |||
657 | EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE) | ||
658 | EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE) | ||
659 | EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE) | ||
660 | EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE) | ||
661 | EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD) | ||
662 | EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE) | ||
663 | EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE) | ||
664 | EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE) | ||
665 | EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE) | ||
666 | EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE) | ||
667 | EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE) | ||
668 | EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE) | ||
669 | EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE) | ||
670 | EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE) | ||
671 | EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE) | ||
672 | EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE) | ||
673 | EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE) | ||
674 | EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE) | ||
675 | EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE) | ||
676 | EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE) | ||
677 | EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE) | ||
678 | EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE) | ||
679 | EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE) | ||
680 | EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE) | ||
681 | EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE) | ||
682 | EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE) | ||
683 | EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE) | ||
684 | EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE) | ||
685 | EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE) | ||
686 | |||
687 | .globl mol_trampoline | ||
688 | .set mol_trampoline, i0x2f00 | ||
689 | |||
690 | . = 0x3000 | ||
691 | |||
692 | AltiVecUnavailable: | ||
693 | EXCEPTION_PROLOG | ||
694 | #ifdef CONFIG_ALTIVEC | ||
695 | bne load_up_altivec /* if from user, just load it up */ | ||
696 | #endif /* CONFIG_ALTIVEC */ | ||
697 | EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception) | ||
698 | |||
699 | #ifdef CONFIG_ALTIVEC | ||
700 | /* Note that the AltiVec support is closely modeled after the FP | ||
701 | * support. Changes to one are likely to be applicable to the | ||
702 | * other! */ | ||
703 | load_up_altivec: | ||
704 | /* | ||
705 | * Disable AltiVec for the task which had AltiVec previously, | ||
706 | * and save its AltiVec registers in its thread_struct. | ||
707 | * Enables AltiVec for use in the kernel on return. | ||
708 | * On SMP we know the AltiVec units are free, since we give it up every | ||
709 | * switch. -- Kumar | ||
710 | */ | ||
711 | mfmsr r5 | ||
712 | oris r5,r5,MSR_VEC@h | ||
713 | MTMSRD(r5) /* enable use of AltiVec now */ | ||
714 | isync | ||
715 | /* | ||
716 | * For SMP, we don't do lazy AltiVec switching because it just gets too | ||
717 | * horrendously complex, especially when a task switches from one CPU | ||
718 | * to another. Instead we call giveup_altivec in switch_to. | ||
719 | */ | ||
720 | #ifndef CONFIG_SMP | ||
721 | tophys(r6,0) | ||
722 | addis r3,r6,last_task_used_altivec@ha | ||
723 | lwz r4,last_task_used_altivec@l(r3) | ||
724 | cmpwi 0,r4,0 | ||
725 | beq 1f | ||
726 | add r4,r4,r6 | ||
727 | addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */ | ||
728 | SAVE_32VRS(0,r10,r4) | ||
729 | mfvscr vr0 | ||
730 | li r10,THREAD_VSCR | ||
731 | stvx vr0,r10,r4 | ||
732 | lwz r5,PT_REGS(r4) | ||
733 | add r5,r5,r6 | ||
734 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
735 | lis r10,MSR_VEC@h | ||
736 | andc r4,r4,r10 /* disable altivec for previous task */ | ||
737 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
738 | 1: | ||
739 | #endif /* CONFIG_SMP */ | ||
740 | /* enable use of AltiVec after return */ | ||
741 | oris r9,r9,MSR_VEC@h | ||
742 | mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ | ||
743 | li r4,1 | ||
744 | li r10,THREAD_VSCR | ||
745 | stw r4,THREAD_USED_VR(r5) | ||
746 | lvx vr0,r10,r5 | ||
747 | mtvscr vr0 | ||
748 | REST_32VRS(0,r10,r5) | ||
749 | #ifndef CONFIG_SMP | ||
750 | subi r4,r5,THREAD | ||
751 | sub r4,r4,r6 | ||
752 | stw r4,last_task_used_altivec@l(r3) | ||
753 | #endif /* CONFIG_SMP */ | ||
754 | /* restore registers and return */ | ||
755 | /* we haven't used ctr or xer or lr */ | ||
756 | b fast_exception_return | ||
757 | |||
758 | /* | ||
759 | * AltiVec unavailable trap from kernel - print a message, but let | ||
760 | * the task use AltiVec in the kernel until it returns to user mode. | ||
761 | */ | ||
762 | KernelAltiVec: | ||
763 | lwz r3,_MSR(r1) | ||
764 | oris r3,r3,MSR_VEC@h | ||
765 | stw r3,_MSR(r1) /* enable use of AltiVec after return */ | ||
766 | lis r3,87f@h | ||
767 | ori r3,r3,87f@l | ||
768 | mr r4,r2 /* current */ | ||
769 | lwz r5,_NIP(r1) | ||
770 | bl printk | ||
771 | b ret_from_except | ||
772 | 87: .string "AltiVec used in kernel (task=%p, pc=%x) \n" | ||
773 | .align 4,0 | ||
774 | |||
775 | /* | ||
776 | * giveup_altivec(tsk) | ||
777 | * Disable AltiVec for the task given as the argument, | ||
778 | * and save the AltiVec registers in its thread_struct. | ||
779 | * Enables AltiVec for use in the kernel on return. | ||
780 | */ | ||
781 | |||
782 | .globl giveup_altivec | ||
783 | giveup_altivec: | ||
784 | mfmsr r5 | ||
785 | oris r5,r5,MSR_VEC@h | ||
786 | SYNC | ||
787 | MTMSRD(r5) /* enable use of AltiVec now */ | ||
788 | isync | ||
789 | cmpwi 0,r3,0 | ||
790 | beqlr- /* if no previous owner, done */ | ||
791 | addi r3,r3,THREAD /* want THREAD of task */ | ||
792 | lwz r5,PT_REGS(r3) | ||
793 | cmpwi 0,r5,0 | ||
794 | SAVE_32VRS(0, r4, r3) | ||
795 | mfvscr vr0 | ||
796 | li r4,THREAD_VSCR | ||
797 | stvx vr0,r4,r3 | ||
798 | beq 1f | ||
799 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
800 | lis r3,MSR_VEC@h | ||
801 | andc r4,r4,r3 /* disable AltiVec for previous task */ | ||
802 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
803 | 1: | ||
804 | #ifndef CONFIG_SMP | ||
805 | li r5,0 | ||
806 | lis r4,last_task_used_altivec@ha | ||
807 | stw r5,last_task_used_altivec@l(r4) | ||
808 | #endif /* CONFIG_SMP */ | ||
809 | blr | ||
810 | #endif /* CONFIG_ALTIVEC */ | ||
811 | |||
812 | /* | ||
813 | * This code is jumped to from the startup code to copy | ||
814 | * the kernel image to physical address 0. | ||
815 | */ | ||
816 | relocate_kernel: | ||
817 | addis r9,r26,klimit@ha /* fetch klimit */ | ||
818 | lwz r25,klimit@l(r9) | ||
819 | addis r25,r25,-KERNELBASE@h | ||
820 | li r3,0 /* Destination base address */ | ||
821 | li r6,0 /* Destination offset */ | ||
822 | li r5,0x4000 /* # bytes of memory to copy */ | ||
823 | bl copy_and_flush /* copy the first 0x4000 bytes */ | ||
824 | addi r0,r3,4f@l /* jump to the address of 4f */ | ||
825 | mtctr r0 /* in copy and do the rest. */ | ||
826 | bctr /* jump to the copy */ | ||
827 | 4: mr r5,r25 | ||
828 | bl copy_and_flush /* copy the rest */ | ||
829 | b turn_on_mmu | ||
830 | |||
831 | /* | ||
832 | * Copy routine used to copy the kernel to start at physical address 0 | ||
833 | * and flush and invalidate the caches as needed. | ||
834 | * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset | ||
835 | * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. | ||
836 | */ | ||
837 | copy_and_flush: | ||
838 | addi r5,r5,-4 | ||
839 | addi r6,r6,-4 | ||
840 | 4: li r0,L1_CACHE_LINE_SIZE/4 | ||
841 | mtctr r0 | ||
842 | 3: addi r6,r6,4 /* copy a cache line */ | ||
843 | lwzx r0,r6,r4 | ||
844 | stwx r0,r6,r3 | ||
845 | bdnz 3b | ||
846 | dcbst r6,r3 /* write it to memory */ | ||
847 | sync | ||
848 | icbi r6,r3 /* flush the icache line */ | ||
849 | cmplw 0,r6,r5 | ||
850 | blt 4b | ||
851 | sync /* additional sync needed on g4 */ | ||
852 | isync | ||
853 | addi r5,r5,4 | ||
854 | addi r6,r6,4 | ||
855 | blr | ||
856 | |||
857 | #ifdef CONFIG_APUS | ||
858 | /* | ||
859 | * On APUS the physical base address of the kernel is not known at compile | ||
860 | * time, which means the __pa/__va constants used are incorrect. In the | ||
861 | * __init section is recorded the virtual addresses of instructions using | ||
862 | * these constants, so all that has to be done is fix these before | ||
863 | * continuing the kernel boot. | ||
864 | * | ||
865 | * r4 = The physical address of the kernel base. | ||
866 | */ | ||
867 | fix_mem_constants: | ||
868 | mr r10,r4 | ||
869 | addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */ | ||
870 | neg r11,r10 /* phys_to_virt constant */ | ||
871 | |||
872 | lis r12,__vtop_table_begin@h | ||
873 | ori r12,r12,__vtop_table_begin@l | ||
874 | add r12,r12,r10 /* table begin phys address */ | ||
875 | lis r13,__vtop_table_end@h | ||
876 | ori r13,r13,__vtop_table_end@l | ||
877 | add r13,r13,r10 /* table end phys address */ | ||
878 | subi r12,r12,4 | ||
879 | subi r13,r13,4 | ||
880 | 1: lwzu r14,4(r12) /* virt address of instruction */ | ||
881 | add r14,r14,r10 /* phys address of instruction */ | ||
882 | lwz r15,0(r14) /* instruction, now insert top */ | ||
883 | rlwimi r15,r10,16,16,31 /* half of vp const in low half */ | ||
884 | stw r15,0(r14) /* of instruction and restore. */ | ||
885 | dcbst r0,r14 /* write it to memory */ | ||
886 | sync | ||
887 | icbi r0,r14 /* flush the icache line */ | ||
888 | cmpw r12,r13 | ||
889 | bne 1b | ||
890 | sync /* additional sync needed on g4 */ | ||
891 | isync | ||
892 | |||
893 | /* | ||
894 | * Map the memory where the exception handlers will | ||
895 | * be copied to when hash constants have been patched. | ||
896 | */ | ||
897 | #ifdef CONFIG_APUS_FAST_EXCEPT | ||
898 | lis r8,0xfff0 | ||
899 | #else | ||
900 | lis r8,0 | ||
901 | #endif | ||
902 | ori r8,r8,0x2 /* 128KB, supervisor */ | ||
903 | mtspr SPRN_DBAT3U,r8 | ||
904 | mtspr SPRN_DBAT3L,r8 | ||
905 | |||
906 | lis r12,__ptov_table_begin@h | ||
907 | ori r12,r12,__ptov_table_begin@l | ||
908 | add r12,r12,r10 /* table begin phys address */ | ||
909 | lis r13,__ptov_table_end@h | ||
910 | ori r13,r13,__ptov_table_end@l | ||
911 | add r13,r13,r10 /* table end phys address */ | ||
912 | subi r12,r12,4 | ||
913 | subi r13,r13,4 | ||
914 | 1: lwzu r14,4(r12) /* virt address of instruction */ | ||
915 | add r14,r14,r10 /* phys address of instruction */ | ||
916 | lwz r15,0(r14) /* instruction, now insert top */ | ||
917 | rlwimi r15,r11,16,16,31 /* half of pv const in low half*/ | ||
918 | stw r15,0(r14) /* of instruction and restore. */ | ||
919 | dcbst r0,r14 /* write it to memory */ | ||
920 | sync | ||
921 | icbi r0,r14 /* flush the icache line */ | ||
922 | cmpw r12,r13 | ||
923 | bne 1b | ||
924 | |||
925 | sync /* additional sync needed on g4 */ | ||
926 | isync /* No speculative loading until now */ | ||
927 | blr | ||
928 | |||
929 | /*********************************************************************** | ||
930 | * Please note that on APUS the exception handlers are located at the | ||
931 | * physical address 0xfff0000. For this reason, the exception handlers | ||
932 | * cannot use relative branches to access the code below. | ||
933 | ***********************************************************************/ | ||
934 | #endif /* CONFIG_APUS */ | ||
935 | |||
936 | #ifdef CONFIG_SMP | ||
937 | #ifdef CONFIG_GEMINI | ||
938 | .globl __secondary_start_gemini | ||
939 | __secondary_start_gemini: | ||
940 | mfspr r4,SPRN_HID0 | ||
941 | ori r4,r4,HID0_ICFI | ||
942 | li r3,0 | ||
943 | ori r3,r3,HID0_ICE | ||
944 | andc r4,r4,r3 | ||
945 | mtspr SPRN_HID0,r4 | ||
946 | sync | ||
947 | b __secondary_start | ||
948 | #endif /* CONFIG_GEMINI */ | ||
949 | |||
950 | .globl __secondary_start_pmac_0 | ||
951 | __secondary_start_pmac_0: | ||
952 | /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ | ||
953 | li r24,0 | ||
954 | b 1f | ||
955 | li r24,1 | ||
956 | b 1f | ||
957 | li r24,2 | ||
958 | b 1f | ||
959 | li r24,3 | ||
960 | 1: | ||
961 | /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0 | ||
962 | set to map the 0xf0000000 - 0xffffffff region */ | ||
963 | mfmsr r0 | ||
964 | rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ | ||
965 | SYNC | ||
966 | mtmsr r0 | ||
967 | isync | ||
968 | |||
969 | .globl __secondary_start | ||
970 | __secondary_start: | ||
971 | /* Copy some CPU settings from CPU 0 */ | ||
972 | bl __restore_cpu_setup | ||
973 | |||
974 | lis r3,-KERNELBASE@h | ||
975 | mr r4,r24 | ||
976 | bl call_setup_cpu /* Call setup_cpu for this CPU */ | ||
977 | #ifdef CONFIG_6xx | ||
978 | lis r3,-KERNELBASE@h | ||
979 | bl init_idle_6xx | ||
980 | #endif /* CONFIG_6xx */ | ||
981 | |||
982 | /* get current_thread_info and current */ | ||
983 | lis r1,secondary_ti@ha | ||
984 | tophys(r1,r1) | ||
985 | lwz r1,secondary_ti@l(r1) | ||
986 | tophys(r2,r1) | ||
987 | lwz r2,TI_TASK(r2) | ||
988 | |||
989 | /* stack */ | ||
990 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | ||
991 | li r0,0 | ||
992 | tophys(r3,r1) | ||
993 | stw r0,0(r3) | ||
994 | |||
995 | /* load up the MMU */ | ||
996 | bl load_up_mmu | ||
997 | |||
998 | /* ptr to phys current thread */ | ||
999 | tophys(r4,r2) | ||
1000 | addi r4,r4,THREAD /* phys address of our thread_struct */ | ||
1001 | CLR_TOP32(r4) | ||
1002 | mtspr SPRN_SPRG3,r4 | ||
1003 | li r3,0 | ||
1004 | mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */ | ||
1005 | |||
1006 | /* enable MMU and jump to start_secondary */ | ||
1007 | li r4,MSR_KERNEL | ||
1008 | FIX_SRR1(r4,r5) | ||
1009 | lis r3,start_secondary@h | ||
1010 | ori r3,r3,start_secondary@l | ||
1011 | mtspr SPRN_SRR0,r3 | ||
1012 | mtspr SPRN_SRR1,r4 | ||
1013 | SYNC | ||
1014 | RFI | ||
1015 | #endif /* CONFIG_SMP */ | ||
1016 | |||
1017 | /* | ||
1018 | * Those generic dummy functions are kept for CPUs not | ||
1019 | * included in CONFIG_6xx | ||
1020 | */ | ||
1021 | #if !defined(CONFIG_6xx) | ||
1022 | _GLOBAL(__save_cpu_setup) | ||
1023 | blr | ||
1024 | _GLOBAL(__restore_cpu_setup) | ||
1025 | blr | ||
1026 | #endif /* !defined(CONFIG_6xx) */ | ||
1027 | |||
1028 | |||
1029 | /* | ||
1030 | * Load stuff into the MMU. Intended to be called with | ||
1031 | * IR=0 and DR=0. | ||
1032 | */ | ||
1033 | load_up_mmu: | ||
1034 | sync /* Force all PTE updates to finish */ | ||
1035 | isync | ||
1036 | tlbia /* Clear all TLB entries */ | ||
1037 | sync /* wait for tlbia/tlbie to finish */ | ||
1038 | TLBSYNC /* ... on all CPUs */ | ||
1039 | /* Load the SDR1 register (hash table base & size) */ | ||
1040 | lis r6,_SDR1@ha | ||
1041 | tophys(r6,r6) | ||
1042 | lwz r6,_SDR1@l(r6) | ||
1043 | mtspr SPRN_SDR1,r6 | ||
1044 | li r0,16 /* load up segment register values */ | ||
1045 | mtctr r0 /* for context 0 */ | ||
1046 | lis r3,0x2000 /* Ku = 1, VSID = 0 */ | ||
1047 | li r4,0 | ||
1048 | 3: mtsrin r3,r4 | ||
1049 | addi r3,r3,0x111 /* increment VSID */ | ||
1050 | addis r4,r4,0x1000 /* address of next segment */ | ||
1051 | bdnz 3b | ||
1052 | |||
1053 | /* Load the BAT registers with the values set up by MMU_init. | ||
1054 | MMU_init takes care of whether we're on a 601 or not. */ | ||
1055 | mfpvr r3 | ||
1056 | srwi r3,r3,16 | ||
1057 | cmpwi r3,1 | ||
1058 | lis r3,BATS@ha | ||
1059 | addi r3,r3,BATS@l | ||
1060 | tophys(r3,r3) | ||
1061 | LOAD_BAT(0,r3,r4,r5) | ||
1062 | LOAD_BAT(1,r3,r4,r5) | ||
1063 | LOAD_BAT(2,r3,r4,r5) | ||
1064 | LOAD_BAT(3,r3,r4,r5) | ||
1065 | |||
1066 | blr | ||
1067 | |||
1068 | /* | ||
1069 | * This is where the main kernel code starts. | ||
1070 | */ | ||
1071 | start_here: | ||
1072 | /* ptr to current */ | ||
1073 | lis r2,init_task@h | ||
1074 | ori r2,r2,init_task@l | ||
1075 | /* Set up for using our exception vectors */ | ||
1076 | /* ptr to phys current thread */ | ||
1077 | tophys(r4,r2) | ||
1078 | addi r4,r4,THREAD /* init task's THREAD */ | ||
1079 | CLR_TOP32(r4) | ||
1080 | mtspr SPRN_SPRG3,r4 | ||
1081 | li r3,0 | ||
1082 | mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */ | ||
1083 | |||
1084 | /* stack */ | ||
1085 | lis r1,init_thread_union@ha | ||
1086 | addi r1,r1,init_thread_union@l | ||
1087 | li r0,0 | ||
1088 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | ||
1089 | /* | ||
1090 | * Do early platform-specific initialization, | ||
1091 | * and set up the MMU. | ||
1092 | */ | ||
1093 | mr r3,r31 | ||
1094 | mr r4,r30 | ||
1095 | bl machine_init | ||
1096 | bl MMU_init | ||
1097 | |||
1098 | #ifdef CONFIG_APUS | ||
1099 | /* Copy exception code to exception vector base on APUS. */ | ||
1100 | lis r4,KERNELBASE@h | ||
1101 | #ifdef CONFIG_APUS_FAST_EXCEPT | ||
1102 | lis r3,0xfff0 /* Copy to 0xfff00000 */ | ||
1103 | #else | ||
1104 | lis r3,0 /* Copy to 0x00000000 */ | ||
1105 | #endif | ||
1106 | li r5,0x4000 /* # bytes of memory to copy */ | ||
1107 | li r6,0 | ||
1108 | bl copy_and_flush /* copy the first 0x4000 bytes */ | ||
1109 | #endif /* CONFIG_APUS */ | ||
1110 | |||
1111 | /* | ||
1112 | * Go back to running unmapped so we can load up new values | ||
1113 | * for SDR1 (hash table pointer) and the segment registers | ||
1114 | * and change to using our exception vectors. | ||
1115 | */ | ||
1116 | lis r4,2f@h | ||
1117 | ori r4,r4,2f@l | ||
1118 | tophys(r4,r4) | ||
1119 | li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) | ||
1120 | FIX_SRR1(r3,r5) | ||
1121 | mtspr SPRN_SRR0,r4 | ||
1122 | mtspr SPRN_SRR1,r3 | ||
1123 | SYNC | ||
1124 | RFI | ||
1125 | /* Load up the kernel context */ | ||
1126 | 2: bl load_up_mmu | ||
1127 | |||
1128 | #ifdef CONFIG_BDI_SWITCH | ||
1129 | /* Add helper information for the Abatron bdiGDB debugger. | ||
1130 | * We do this here because we know the mmu is disabled, and | ||
1131 | * will be enabled for real in just a few instructions. | ||
1132 | */ | ||
1133 | lis r5, abatron_pteptrs@h | ||
1134 | ori r5, r5, abatron_pteptrs@l | ||
1135 | stw r5, 0xf0(r0) /* This much match your Abatron config */ | ||
1136 | lis r6, swapper_pg_dir@h | ||
1137 | ori r6, r6, swapper_pg_dir@l | ||
1138 | tophys(r5, r5) | ||
1139 | stw r6, 0(r5) | ||
1140 | #endif /* CONFIG_BDI_SWITCH */ | ||
1141 | |||
1142 | /* Now turn on the MMU for real! */ | ||
1143 | li r4,MSR_KERNEL | ||
1144 | FIX_SRR1(r4,r5) | ||
1145 | lis r3,start_kernel@h | ||
1146 | ori r3,r3,start_kernel@l | ||
1147 | mtspr SPRN_SRR0,r3 | ||
1148 | mtspr SPRN_SRR1,r4 | ||
1149 | SYNC | ||
1150 | RFI | ||
1151 | |||
1152 | /* | ||
1153 | * Set up the segment registers for a new context. | ||
1154 | */ | ||
1155 | _GLOBAL(set_context) | ||
1156 | mulli r3,r3,897 /* multiply context by skew factor */ | ||
1157 | rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */ | ||
1158 | addis r3,r3,0x6000 /* Set Ks, Ku bits */ | ||
1159 | li r0,NUM_USER_SEGMENTS | ||
1160 | mtctr r0 | ||
1161 | |||
1162 | #ifdef CONFIG_BDI_SWITCH | ||
1163 | /* Context switch the PTE pointer for the Abatron BDI2000. | ||
1164 | * The PGDIR is passed as second argument. | ||
1165 | */ | ||
1166 | lis r5, KERNELBASE@h | ||
1167 | lwz r5, 0xf0(r5) | ||
1168 | stw r4, 0x4(r5) | ||
1169 | #endif | ||
1170 | li r4,0 | ||
1171 | isync | ||
1172 | 3: | ||
1173 | mtsrin r3,r4 | ||
1174 | addi r3,r3,0x111 /* next VSID */ | ||
1175 | rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */ | ||
1176 | addis r4,r4,0x1000 /* address of next segment */ | ||
1177 | bdnz 3b | ||
1178 | sync | ||
1179 | isync | ||
1180 | blr | ||
1181 | |||
1182 | /* | ||
1183 | * An undocumented "feature" of 604e requires that the v bit | ||
1184 | * be cleared before changing BAT values. | ||
1185 | * | ||
1186 | * Also, newer IBM firmware does not clear bat3 and 4 so | ||
1187 | * this makes sure it's done. | ||
1188 | * -- Cort | ||
1189 | */ | ||
1190 | clear_bats: | ||
1191 | li r10,0 | ||
1192 | mfspr r9,SPRN_PVR | ||
1193 | rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ | ||
1194 | cmpwi r9, 1 | ||
1195 | beq 1f | ||
1196 | |||
1197 | mtspr SPRN_DBAT0U,r10 | ||
1198 | mtspr SPRN_DBAT0L,r10 | ||
1199 | mtspr SPRN_DBAT1U,r10 | ||
1200 | mtspr SPRN_DBAT1L,r10 | ||
1201 | mtspr SPRN_DBAT2U,r10 | ||
1202 | mtspr SPRN_DBAT2L,r10 | ||
1203 | mtspr SPRN_DBAT3U,r10 | ||
1204 | mtspr SPRN_DBAT3L,r10 | ||
1205 | 1: | ||
1206 | mtspr SPRN_IBAT0U,r10 | ||
1207 | mtspr SPRN_IBAT0L,r10 | ||
1208 | mtspr SPRN_IBAT1U,r10 | ||
1209 | mtspr SPRN_IBAT1L,r10 | ||
1210 | mtspr SPRN_IBAT2U,r10 | ||
1211 | mtspr SPRN_IBAT2L,r10 | ||
1212 | mtspr SPRN_IBAT3U,r10 | ||
1213 | mtspr SPRN_IBAT3L,r10 | ||
1214 | BEGIN_FTR_SECTION | ||
1215 | /* Here's a tweak: at this point, CPU setup have | ||
1216 | * not been called yet, so HIGH_BAT_EN may not be | ||
1217 | * set in HID0 for the 745x processors. However, it | ||
1218 | * seems that doesn't affect our ability to actually | ||
1219 | * write to these SPRs. | ||
1220 | */ | ||
1221 | mtspr SPRN_DBAT4U,r10 | ||
1222 | mtspr SPRN_DBAT4L,r10 | ||
1223 | mtspr SPRN_DBAT5U,r10 | ||
1224 | mtspr SPRN_DBAT5L,r10 | ||
1225 | mtspr SPRN_DBAT6U,r10 | ||
1226 | mtspr SPRN_DBAT6L,r10 | ||
1227 | mtspr SPRN_DBAT7U,r10 | ||
1228 | mtspr SPRN_DBAT7L,r10 | ||
1229 | mtspr SPRN_IBAT4U,r10 | ||
1230 | mtspr SPRN_IBAT4L,r10 | ||
1231 | mtspr SPRN_IBAT5U,r10 | ||
1232 | mtspr SPRN_IBAT5L,r10 | ||
1233 | mtspr SPRN_IBAT6U,r10 | ||
1234 | mtspr SPRN_IBAT6L,r10 | ||
1235 | mtspr SPRN_IBAT7U,r10 | ||
1236 | mtspr SPRN_IBAT7L,r10 | ||
1237 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) | ||
1238 | blr | ||
1239 | |||
1240 | flush_tlbs: | ||
1241 | lis r10, 0x40 | ||
1242 | 1: addic. r10, r10, -0x1000 | ||
1243 | tlbie r10 | ||
1244 | blt 1b | ||
1245 | sync | ||
1246 | blr | ||
1247 | |||
1248 | mmu_off: | ||
1249 | addi r4, r3, __after_mmu_off - _start | ||
1250 | mfmsr r3 | ||
1251 | andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ | ||
1252 | beqlr | ||
1253 | andc r3,r3,r0 | ||
1254 | mtspr SPRN_SRR0,r4 | ||
1255 | mtspr SPRN_SRR1,r3 | ||
1256 | sync | ||
1257 | RFI | ||
1258 | |||
1259 | /* | ||
1260 | * Use the first pair of BAT registers to map the 1st 16MB | ||
1261 | * of RAM to KERNELBASE. From this point on we can't safely | ||
1262 | * call OF any more. | ||
1263 | */ | ||
1264 | initial_bats: | ||
1265 | lis r11,KERNELBASE@h | ||
1266 | mfspr r9,SPRN_PVR | ||
1267 | rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ | ||
1268 | cmpwi 0,r9,1 | ||
1269 | bne 4f | ||
1270 | ori r11,r11,4 /* set up BAT registers for 601 */ | ||
1271 | li r8,0x7f /* valid, block length = 8MB */ | ||
1272 | oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */ | ||
1273 | oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */ | ||
1274 | mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */ | ||
1275 | mtspr SPRN_IBAT0L,r8 /* lower BAT register */ | ||
1276 | mtspr SPRN_IBAT1U,r9 | ||
1277 | mtspr SPRN_IBAT1L,r10 | ||
1278 | isync | ||
1279 | blr | ||
1280 | |||
1281 | 4: tophys(r8,r11) | ||
1282 | #ifdef CONFIG_SMP | ||
1283 | ori r8,r8,0x12 /* R/W access, M=1 */ | ||
1284 | #else | ||
1285 | ori r8,r8,2 /* R/W access */ | ||
1286 | #endif /* CONFIG_SMP */ | ||
1287 | #ifdef CONFIG_APUS | ||
1288 | ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */ | ||
1289 | #else | ||
1290 | ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */ | ||
1291 | #endif /* CONFIG_APUS */ | ||
1292 | |||
1293 | mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */ | ||
1294 | mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */ | ||
1295 | mtspr SPRN_IBAT0L,r8 | ||
1296 | mtspr SPRN_IBAT0U,r11 | ||
1297 | isync | ||
1298 | blr | ||
1299 | |||
1300 | #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) | ||
1301 | setup_disp_bat: | ||
1302 | /* | ||
1303 | * setup the display bat prepared for us in prom.c | ||
1304 | */ | ||
1305 | mflr r8 | ||
1306 | bl reloc_offset | ||
1307 | mtlr r8 | ||
1308 | addis r8,r3,disp_BAT@ha | ||
1309 | addi r8,r8,disp_BAT@l | ||
1310 | lwz r11,0(r8) | ||
1311 | lwz r8,4(r8) | ||
1312 | mfspr r9,SPRN_PVR | ||
1313 | rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ | ||
1314 | cmpwi 0,r9,1 | ||
1315 | beq 1f | ||
1316 | mtspr SPRN_DBAT3L,r8 | ||
1317 | mtspr SPRN_DBAT3U,r11 | ||
1318 | blr | ||
1319 | 1: mtspr SPRN_IBAT3L,r8 | ||
1320 | mtspr SPRN_IBAT3U,r11 | ||
1321 | blr | ||
1322 | |||
1323 | #endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */ | ||
1324 | |||
1325 | |||
1326 | #ifdef CONFIG_8260 | ||
1327 | /* Jump into the system reset for the rom. | ||
1328 | * We first disable the MMU, and then jump to the ROM reset address. | ||
1329 | * | ||
1330 | * r3 is the board info structure, r4 is the location for starting. | ||
1331 | * I use this for building a small kernel that can load other kernels, | ||
1332 | * rather than trying to write or rely on a rom monitor that can tftp load. | ||
1333 | */ | ||
1334 | .globl m8260_gorom | ||
1335 | m8260_gorom: | ||
1336 | mfmsr r0 | ||
1337 | rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */ | ||
1338 | sync | ||
1339 | mtmsr r0 | ||
1340 | sync | ||
1341 | mfspr r11, SPRN_HID0 | ||
1342 | lis r10, 0 | ||
1343 | ori r10,r10,HID0_ICE|HID0_DCE | ||
1344 | andc r11, r11, r10 | ||
1345 | mtspr SPRN_HID0, r11 | ||
1346 | isync | ||
1347 | li r5, MSR_ME|MSR_RI | ||
1348 | lis r6,2f@h | ||
1349 | addis r6,r6,-KERNELBASE@h | ||
1350 | ori r6,r6,2f@l | ||
1351 | mtspr SPRN_SRR0,r6 | ||
1352 | mtspr SPRN_SRR1,r5 | ||
1353 | isync | ||
1354 | sync | ||
1355 | rfi | ||
1356 | 2: | ||
1357 | mtlr r4 | ||
1358 | blr | ||
1359 | #endif | ||
1360 | |||
1361 | |||
1362 | /* | ||
1363 | * We put a few things here that have to be page-aligned. | ||
1364 | * This stuff goes at the beginning of the data segment, | ||
1365 | * which is page-aligned. | ||
1366 | */ | ||
1367 | .data | ||
1368 | .globl sdata | ||
1369 | sdata: | ||
1370 | .globl empty_zero_page | ||
1371 | empty_zero_page: | ||
1372 | .space 4096 | ||
1373 | |||
1374 | .globl swapper_pg_dir | ||
1375 | swapper_pg_dir: | ||
1376 | .space 4096 | ||
1377 | |||
1378 | /* | ||
1379 | * This space gets a copy of optional info passed to us by the bootstrap | ||
1380 | * Used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
1381 | */ | ||
1382 | .globl cmd_line | ||
1383 | cmd_line: | ||
1384 | .space 512 | ||
1385 | |||
1386 | .globl intercept_table | ||
1387 | intercept_table: | ||
1388 | .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700 | ||
1389 | .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0 | ||
1390 | .long 0, 0, 0, i0x1300, 0, 0, 0, 0 | ||
1391 | .long 0, 0, 0, 0, 0, 0, 0, 0 | ||
1392 | .long 0, 0, 0, 0, 0, 0, 0, 0 | ||
1393 | .long 0, 0, 0, 0, 0, 0, 0, 0 | ||
1394 | |||
1395 | /* Room for two PTE pointers, usually the kernel and current user pointers | ||
1396 | * to their respective root page table. | ||
1397 | */ | ||
1398 | abatron_pteptrs: | ||
1399 | .space 8 | ||