aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/head_40x.S
diff options
context:
space:
mode:
authorJosh Boyer <jwboyer@linux.vnet.ibm.com>2007-08-20 08:27:07 -0400
committerJosh Boyer <jwboyer@linux.vnet.ibm.com>2007-08-20 08:27:07 -0400
commit15f6527e8e63e793f8ab1ddce4ed3c487ebd0d42 (patch)
tree50095bd2a42f97fdd6559f27b51c6336a8067d86 /arch/powerpc/kernel/head_40x.S
parenta65517f857bf6657839957617af942a4b631fba5 (diff)
[POWERPC] Rename 4xx paths to 40x
4xx is a bit of a misnomer for certain things, as they really apply to PowerPC 40x only. Rename some of the files to clean this up. Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com> Acked-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'arch/powerpc/kernel/head_40x.S')
-rw-r--r--arch/powerpc/kernel/head_40x.S1021
1 files changed, 1021 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
new file mode 100644
index 000000000000..adc7f8097cd4
--- /dev/null
+++ b/arch/powerpc/kernel/head_40x.S
@@ -0,0 +1,1021 @@
1/*
2 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
3 * Initial PowerPC version.
4 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
5 * Rewritten for PReP
6 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
7 * Low-level exception handers, MMU support, and rewrite.
8 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
9 * PowerPC 8xx modifications.
10 * Copyright (c) 1998-1999 TiVo, Inc.
11 * PowerPC 403GCX modifications.
12 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
13 * PowerPC 403GCX/405GP modifications.
14 * Copyright 2000 MontaVista Software Inc.
15 * PPC405 modifications
16 * PowerPC 403GCX/405GP modifications.
17 * Author: MontaVista Software, Inc.
18 * frank_rowand@mvista.com or source@mvista.com
19 * debbie_chu@mvista.com
20 *
21 *
22 * Module name: head_4xx.S
23 *
24 * Description:
25 * Kernel execution entry point code.
26 *
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; either version
30 * 2 of the License, or (at your option) any later version.
31 *
32 */
33
34#include <asm/processor.h>
35#include <asm/page.h>
36#include <asm/mmu.h>
37#include <asm/pgtable.h>
38#include <asm/ibm4xx.h>
39#include <asm/cputable.h>
40#include <asm/thread_info.h>
41#include <asm/ppc_asm.h>
42#include <asm/asm-offsets.h>
43
44/* As with the other PowerPC ports, it is expected that when code
45 * execution begins here, the following registers contain valid, yet
46 * optional, information:
47 *
48 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
49 * r4 - Starting address of the init RAM disk
50 * r5 - Ending address of the init RAM disk
51 * r6 - Start of kernel command line string (e.g. "mem=96m")
52 * r7 - End of kernel command line string
53 *
54 * This is all going to change RSN when we add bi_recs....... -- Dan
55 */
56 .text
57_GLOBAL(_stext)
58_GLOBAL(_start)
59
60 /* Save parameters we are passed.
61 */
62 mr r31,r3
63 mr r30,r4
64 mr r29,r5
65 mr r28,r6
66 mr r27,r7
67
68 /* We have to turn on the MMU right away so we get cache modes
69 * set correctly.
70 */
71 bl initial_mmu
72
73/* We now have the lower 16 Meg mapped into TLB entries, and the caches
74 * ready to work.
75 */
76turn_on_mmu:
77 lis r0,MSR_KERNEL@h
78 ori r0,r0,MSR_KERNEL@l
79 mtspr SPRN_SRR1,r0
80 lis r0,start_here@h
81 ori r0,r0,start_here@l
82 mtspr SPRN_SRR0,r0
83 SYNC
84 rfi /* enables MMU */
85 b . /* prevent prefetch past rfi */
86
87/*
88 * This area is used for temporarily saving registers during the
89 * critical exception prolog.
90 */
91 . = 0xc0
92crit_save:
93_GLOBAL(crit_r10)
94 .space 4
95_GLOBAL(crit_r11)
96 .space 4
97
98/*
99 * Exception vector entry code. This code runs with address translation
100 * turned off (i.e. using physical addresses). We assume SPRG3 has the
101 * physical address of the current task thread_struct.
102 * Note that we have to have decremented r1 before we write to any fields
103 * of the exception frame, since a critical interrupt could occur at any
104 * time, and it will write to the area immediately below the current r1.
105 */
106#define NORMAL_EXCEPTION_PROLOG \
107 mtspr SPRN_SPRG0,r10; /* save two registers to work with */\
108 mtspr SPRN_SPRG1,r11; \
109 mtspr SPRN_SPRG2,r1; \
110 mfcr r10; /* save CR in r10 for now */\
111 mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
112 andi. r11,r11,MSR_PR; \
113 beq 1f; \
114 mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\
115 lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\
116 addi r1,r1,THREAD_SIZE; \
1171: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\
118 tophys(r11,r1); \
119 stw r10,_CCR(r11); /* save various registers */\
120 stw r12,GPR12(r11); \
121 stw r9,GPR9(r11); \
122 mfspr r10,SPRN_SPRG0; \
123 stw r10,GPR10(r11); \
124 mfspr r12,SPRN_SPRG1; \
125 stw r12,GPR11(r11); \
126 mflr r10; \
127 stw r10,_LINK(r11); \
128 mfspr r10,SPRN_SPRG2; \
129 mfspr r12,SPRN_SRR0; \
130 stw r10,GPR1(r11); \
131 mfspr r9,SPRN_SRR1; \
132 stw r10,0(r11); \
133 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
134 stw r0,GPR0(r11); \
135 SAVE_4GPRS(3, r11); \
136 SAVE_2GPRS(7, r11)
137
138/*
139 * Exception prolog for critical exceptions. This is a little different
140 * from the normal exception prolog above since a critical exception
141 * can potentially occur at any point during normal exception processing.
142 * Thus we cannot use the same SPRG registers as the normal prolog above.
143 * Instead we use a couple of words of memory at low physical addresses.
144 * This is OK since we don't support SMP on these processors.
145 */
146#define CRITICAL_EXCEPTION_PROLOG \
147 stw r10,crit_r10@l(0); /* save two registers to work with */\
148 stw r11,crit_r11@l(0); \
149 mfcr r10; /* save CR in r10 for now */\
150 mfspr r11,SPRN_SRR3; /* check whether user or kernel */\
151 andi. r11,r11,MSR_PR; \
152 lis r11,critical_stack_top@h; \
153 ori r11,r11,critical_stack_top@l; \
154 beq 1f; \
155 /* COMING FROM USER MODE */ \
156 mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\
157 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
158 addi r11,r11,THREAD_SIZE; \
1591: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
160 tophys(r11,r11); \
161 stw r10,_CCR(r11); /* save various registers */\
162 stw r12,GPR12(r11); \
163 stw r9,GPR9(r11); \
164 mflr r10; \
165 stw r10,_LINK(r11); \
166 mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\
167 stw r12,_DEAR(r11); /* since they may have had stuff */\
168 mfspr r9,SPRN_ESR; /* in them at the point where the */\
169 stw r9,_ESR(r11); /* exception was taken */\
170 mfspr r12,SPRN_SRR2; \
171 stw r1,GPR1(r11); \
172 mfspr r9,SPRN_SRR3; \
173 stw r1,0(r11); \
174 tovirt(r1,r11); \
175 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
176 stw r0,GPR0(r11); \
177 SAVE_4GPRS(3, r11); \
178 SAVE_2GPRS(7, r11)
179
180 /*
181 * State at this point:
182 * r9 saved in stack frame, now saved SRR3 & ~MSR_WE
183 * r10 saved in crit_r10 and in stack frame, trashed
184 * r11 saved in crit_r11 and in stack frame,
185 * now phys stack/exception frame pointer
186 * r12 saved in stack frame, now saved SRR2
187 * CR saved in stack frame, CR0.EQ = !SRR3.PR
188 * LR, DEAR, ESR in stack frame
189 * r1 saved in stack frame, now virt stack/excframe pointer
190 * r0, r3-r8 saved in stack frame
191 */
192
193/*
194 * Exception vectors.
195 */
196#define START_EXCEPTION(n, label) \
197 . = n; \
198label:
199
200#define EXCEPTION(n, label, hdlr, xfer) \
201 START_EXCEPTION(n, label); \
202 NORMAL_EXCEPTION_PROLOG; \
203 addi r3,r1,STACK_FRAME_OVERHEAD; \
204 xfer(n, hdlr)
205
206#define CRITICAL_EXCEPTION(n, label, hdlr) \
207 START_EXCEPTION(n, label); \
208 CRITICAL_EXCEPTION_PROLOG; \
209 addi r3,r1,STACK_FRAME_OVERHEAD; \
210 EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
211 NOCOPY, crit_transfer_to_handler, \
212 ret_from_crit_exc)
213
214#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \
215 li r10,trap; \
216 stw r10,_TRAP(r11); \
217 lis r10,msr@h; \
218 ori r10,r10,msr@l; \
219 copyee(r10, r9); \
220 bl tfer; \
221 .long hdlr; \
222 .long ret
223
224#define COPY_EE(d, s) rlwimi d,s,0,16,16
225#define NOCOPY(d, s)
226
227#define EXC_XFER_STD(n, hdlr) \
228 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \
229 ret_from_except_full)
230
231#define EXC_XFER_LITE(n, hdlr) \
232 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
233 ret_from_except)
234
235#define EXC_XFER_EE(n, hdlr) \
236 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \
237 ret_from_except_full)
238
239#define EXC_XFER_EE_LITE(n, hdlr) \
240 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \
241 ret_from_except)
242
243
244/*
245 * 0x0100 - Critical Interrupt Exception
246 */
247 CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception)
248
249/*
250 * 0x0200 - Machine Check Exception
251 */
252 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
253
254/*
255 * 0x0300 - Data Storage Exception
256 * This happens for just a few reasons. U0 set (but we don't do that),
257 * or zone protection fault (user violation, write to protected page).
258 * If this is just an update of modified status, we do that quickly
259 * and exit. Otherwise, we call heavywight functions to do the work.
260 */
261 START_EXCEPTION(0x0300, DataStorage)
262 mtspr SPRN_SPRG0, r10 /* Save some working registers */
263 mtspr SPRN_SPRG1, r11
264#ifdef CONFIG_403GCX
265 stw r12, 0(r0)
266 stw r9, 4(r0)
267 mfcr r11
268 mfspr r12, SPRN_PID
269 stw r11, 8(r0)
270 stw r12, 12(r0)
271#else
272 mtspr SPRN_SPRG4, r12
273 mtspr SPRN_SPRG5, r9
274 mfcr r11
275 mfspr r12, SPRN_PID
276 mtspr SPRN_SPRG7, r11
277 mtspr SPRN_SPRG6, r12
278#endif
279
280 /* First, check if it was a zone fault (which means a user
281 * tried to access a kernel or read-protected page - always
282 * a SEGV). All other faults here must be stores, so no
283 * need to check ESR_DST as well. */
284 mfspr r10, SPRN_ESR
285 andis. r10, r10, ESR_DIZ@h
286 bne 2f
287
288 mfspr r10, SPRN_DEAR /* Get faulting address */
289
290 /* If we are faulting a kernel address, we have to use the
291 * kernel page tables.
292 */
293 lis r11, TASK_SIZE@h
294 cmplw r10, r11
295 blt+ 3f
296 lis r11, swapper_pg_dir@h
297 ori r11, r11, swapper_pg_dir@l
298 li r9, 0
299 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
300 b 4f
301
302 /* Get the PGD for the current thread.
303 */
3043:
305 mfspr r11,SPRN_SPRG3
306 lwz r11,PGDIR(r11)
3074:
308 tophys(r11, r11)
309 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
310 lwz r11, 0(r11) /* Get L1 entry */
311 rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */
312 beq 2f /* Bail if no table */
313
314 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
315 lwz r11, 0(r12) /* Get Linux PTE */
316
317 andi. r9, r11, _PAGE_RW /* Is it writeable? */
318 beq 2f /* Bail if not */
319
320 /* Update 'changed'.
321 */
322 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
323 stw r11, 0(r12) /* Update Linux page table */
324
325 /* Most of the Linux PTE is ready to load into the TLB LO.
326 * We set ZSEL, where only the LS-bit determines user access.
327 * We set execute, because we don't have the granularity to
328 * properly set this at the page level (Linux problem).
329 * If shared is set, we cause a zero PID->TID load.
330 * Many of these bits are software only. Bits we don't set
331 * here we (properly should) assume have the appropriate value.
332 */
333 li r12, 0x0ce2
334 andc r11, r11, r12 /* Make sure 20, 21 are zero */
335
336 /* find the TLB index that caused the fault. It has to be here.
337 */
338 tlbsx r9, 0, r10
339
340 tlbwe r11, r9, TLB_DATA /* Load TLB LO */
341
342 /* Done...restore registers and get out of here.
343 */
344#ifdef CONFIG_403GCX
345 lwz r12, 12(r0)
346 lwz r11, 8(r0)
347 mtspr SPRN_PID, r12
348 mtcr r11
349 lwz r9, 4(r0)
350 lwz r12, 0(r0)
351#else
352 mfspr r12, SPRN_SPRG6
353 mfspr r11, SPRN_SPRG7
354 mtspr SPRN_PID, r12
355 mtcr r11
356 mfspr r9, SPRN_SPRG5
357 mfspr r12, SPRN_SPRG4
358#endif
359 mfspr r11, SPRN_SPRG1
360 mfspr r10, SPRN_SPRG0
361 PPC405_ERR77_SYNC
362 rfi /* Should sync shadow TLBs */
363 b . /* prevent prefetch past rfi */
364
3652:
366 /* The bailout. Restore registers to pre-exception conditions
367 * and call the heavyweights to help us out.
368 */
369#ifdef CONFIG_403GCX
370 lwz r12, 12(r0)
371 lwz r11, 8(r0)
372 mtspr SPRN_PID, r12
373 mtcr r11
374 lwz r9, 4(r0)
375 lwz r12, 0(r0)
376#else
377 mfspr r12, SPRN_SPRG6
378 mfspr r11, SPRN_SPRG7
379 mtspr SPRN_PID, r12
380 mtcr r11
381 mfspr r9, SPRN_SPRG5
382 mfspr r12, SPRN_SPRG4
383#endif
384 mfspr r11, SPRN_SPRG1
385 mfspr r10, SPRN_SPRG0
386 b DataAccess
387
388/*
389 * 0x0400 - Instruction Storage Exception
390 * This is caused by a fetch from non-execute or guarded pages.
391 */
392 START_EXCEPTION(0x0400, InstructionAccess)
393 NORMAL_EXCEPTION_PROLOG
394 mr r4,r12 /* Pass SRR0 as arg2 */
395 li r5,0 /* Pass zero as arg3 */
396 EXC_XFER_EE_LITE(0x400, handle_page_fault)
397
398/* 0x0500 - External Interrupt Exception */
399 EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
400
401/* 0x0600 - Alignment Exception */
402 START_EXCEPTION(0x0600, Alignment)
403 NORMAL_EXCEPTION_PROLOG
404 mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */
405 stw r4,_DEAR(r11)
406 addi r3,r1,STACK_FRAME_OVERHEAD
407 EXC_XFER_EE(0x600, alignment_exception)
408
409/* 0x0700 - Program Exception */
410 START_EXCEPTION(0x0700, ProgramCheck)
411 NORMAL_EXCEPTION_PROLOG
412 mfspr r4,SPRN_ESR /* Grab the ESR and save it */
413 stw r4,_ESR(r11)
414 addi r3,r1,STACK_FRAME_OVERHEAD
415 EXC_XFER_STD(0x700, program_check_exception)
416
417 EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE)
418 EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE)
419 EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE)
420 EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE)
421
422/* 0x0C00 - System Call Exception */
423 START_EXCEPTION(0x0C00, SystemCall)
424 NORMAL_EXCEPTION_PROLOG
425 EXC_XFER_EE_LITE(0xc00, DoSyscall)
426
427 EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE)
428 EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE)
429 EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE)
430
431/* 0x1000 - Programmable Interval Timer (PIT) Exception */
432 START_EXCEPTION(0x1000, Decrementer)
433 NORMAL_EXCEPTION_PROLOG
434 lis r0,TSR_PIS@h
435 mtspr SPRN_TSR,r0 /* Clear the PIT exception */
436 addi r3,r1,STACK_FRAME_OVERHEAD
437 EXC_XFER_LITE(0x1000, timer_interrupt)
438
439#if 0
440/* NOTE:
441 * FIT and WDT handlers are not implemented yet.
442 */
443
444/* 0x1010 - Fixed Interval Timer (FIT) Exception
445*/
446 STND_EXCEPTION(0x1010, FITException, unknown_exception)
447
448/* 0x1020 - Watchdog Timer (WDT) Exception
449*/
450#ifdef CONFIG_BOOKE_WDT
451 CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException)
452#else
453 CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception)
454#endif
455#endif
456
457/* 0x1100 - Data TLB Miss Exception
458 * As the name implies, translation is not in the MMU, so search the
459 * page tables and fix it. The only purpose of this function is to
460 * load TLB entries from the page table if they exist.
461 */
462 START_EXCEPTION(0x1100, DTLBMiss)
463 mtspr SPRN_SPRG0, r10 /* Save some working registers */
464 mtspr SPRN_SPRG1, r11
465#ifdef CONFIG_403GCX
466 stw r12, 0(r0)
467 stw r9, 4(r0)
468 mfcr r11
469 mfspr r12, SPRN_PID
470 stw r11, 8(r0)
471 stw r12, 12(r0)
472#else
473 mtspr SPRN_SPRG4, r12
474 mtspr SPRN_SPRG5, r9
475 mfcr r11
476 mfspr r12, SPRN_PID
477 mtspr SPRN_SPRG7, r11
478 mtspr SPRN_SPRG6, r12
479#endif
480 mfspr r10, SPRN_DEAR /* Get faulting address */
481
482 /* If we are faulting a kernel address, we have to use the
483 * kernel page tables.
484 */
485 lis r11, TASK_SIZE@h
486 cmplw r10, r11
487 blt+ 3f
488 lis r11, swapper_pg_dir@h
489 ori r11, r11, swapper_pg_dir@l
490 li r9, 0
491 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
492 b 4f
493
494 /* Get the PGD for the current thread.
495 */
4963:
497 mfspr r11,SPRN_SPRG3
498 lwz r11,PGDIR(r11)
4994:
500 tophys(r11, r11)
501 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
502 lwz r12, 0(r11) /* Get L1 entry */
503 andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */
504 beq 2f /* Bail if no table */
505
506 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
507 lwz r11, 0(r12) /* Get Linux PTE */
508 andi. r9, r11, _PAGE_PRESENT
509 beq 5f
510
511 ori r11, r11, _PAGE_ACCESSED
512 stw r11, 0(r12)
513
514 /* Create TLB tag. This is the faulting address plus a static
515 * set of bits. These are size, valid, E, U0.
516 */
517 li r12, 0x00c0
518 rlwimi r10, r12, 0, 20, 31
519
520 b finish_tlb_load
521
5222: /* Check for possible large-page pmd entry */
523 rlwinm. r9, r12, 2, 22, 24
524 beq 5f
525
526 /* Create TLB tag. This is the faulting address, plus a static
527 * set of bits (valid, E, U0) plus the size from the PMD.
528 */
529 ori r9, r9, 0x40
530 rlwimi r10, r9, 0, 20, 31
531 mr r11, r12
532
533 b finish_tlb_load
534
5355:
536 /* The bailout. Restore registers to pre-exception conditions
537 * and call the heavyweights to help us out.
538 */
539#ifdef CONFIG_403GCX
540 lwz r12, 12(r0)
541 lwz r11, 8(r0)
542 mtspr SPRN_PID, r12
543 mtcr r11
544 lwz r9, 4(r0)
545 lwz r12, 0(r0)
546#else
547 mfspr r12, SPRN_SPRG6
548 mfspr r11, SPRN_SPRG7
549 mtspr SPRN_PID, r12
550 mtcr r11
551 mfspr r9, SPRN_SPRG5
552 mfspr r12, SPRN_SPRG4
553#endif
554 mfspr r11, SPRN_SPRG1
555 mfspr r10, SPRN_SPRG0
556 b DataAccess
557
558/* 0x1200 - Instruction TLB Miss Exception
559 * Nearly the same as above, except we get our information from different
560 * registers and bailout to a different point.
561 */
562 START_EXCEPTION(0x1200, ITLBMiss)
563 mtspr SPRN_SPRG0, r10 /* Save some working registers */
564 mtspr SPRN_SPRG1, r11
565#ifdef CONFIG_403GCX
566 stw r12, 0(r0)
567 stw r9, 4(r0)
568 mfcr r11
569 mfspr r12, SPRN_PID
570 stw r11, 8(r0)
571 stw r12, 12(r0)
572#else
573 mtspr SPRN_SPRG4, r12
574 mtspr SPRN_SPRG5, r9
575 mfcr r11
576 mfspr r12, SPRN_PID
577 mtspr SPRN_SPRG7, r11
578 mtspr SPRN_SPRG6, r12
579#endif
580 mfspr r10, SPRN_SRR0 /* Get faulting address */
581
582 /* If we are faulting a kernel address, we have to use the
583 * kernel page tables.
584 */
585 lis r11, TASK_SIZE@h
586 cmplw r10, r11
587 blt+ 3f
588 lis r11, swapper_pg_dir@h
589 ori r11, r11, swapper_pg_dir@l
590 li r9, 0
591 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
592 b 4f
593
594 /* Get the PGD for the current thread.
595 */
5963:
597 mfspr r11,SPRN_SPRG3
598 lwz r11,PGDIR(r11)
5994:
600 tophys(r11, r11)
601 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
602 lwz r12, 0(r11) /* Get L1 entry */
603 andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */
604 beq 2f /* Bail if no table */
605
606 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
607 lwz r11, 0(r12) /* Get Linux PTE */
608 andi. r9, r11, _PAGE_PRESENT
609 beq 5f
610
611 ori r11, r11, _PAGE_ACCESSED
612 stw r11, 0(r12)
613
614 /* Create TLB tag. This is the faulting address plus a static
615 * set of bits. These are size, valid, E, U0.
616 */
617 li r12, 0x00c0
618 rlwimi r10, r12, 0, 20, 31
619
620 b finish_tlb_load
621
6222: /* Check for possible large-page pmd entry */
623 rlwinm. r9, r12, 2, 22, 24
624 beq 5f
625
626 /* Create TLB tag. This is the faulting address, plus a static
627 * set of bits (valid, E, U0) plus the size from the PMD.
628 */
629 ori r9, r9, 0x40
630 rlwimi r10, r9, 0, 20, 31
631 mr r11, r12
632
633 b finish_tlb_load
634
6355:
636 /* The bailout. Restore registers to pre-exception conditions
637 * and call the heavyweights to help us out.
638 */
639#ifdef CONFIG_403GCX
640 lwz r12, 12(r0)
641 lwz r11, 8(r0)
642 mtspr SPRN_PID, r12
643 mtcr r11
644 lwz r9, 4(r0)
645 lwz r12, 0(r0)
646#else
647 mfspr r12, SPRN_SPRG6
648 mfspr r11, SPRN_SPRG7
649 mtspr SPRN_PID, r12
650 mtcr r11
651 mfspr r9, SPRN_SPRG5
652 mfspr r12, SPRN_SPRG4
653#endif
654 mfspr r11, SPRN_SPRG1
655 mfspr r10, SPRN_SPRG0
656 b InstructionAccess
657
658 EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE)
659 EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE)
660 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
661 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
662#ifdef CONFIG_IBM405_ERR51
663 /* 405GP errata 51 */
664 START_EXCEPTION(0x1700, Trap_17)
665 b DTLBMiss
666#else
667 EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
668#endif
669 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
670 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
671 EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE)
672 EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE)
673 EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE)
674 EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE)
675 EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE)
676 EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE)
677
678/* Check for a single step debug exception while in an exception
679 * handler before state has been saved. This is to catch the case
680 * where an instruction that we are trying to single step causes
681 * an exception (eg ITLB/DTLB miss) and thus the first instruction of
682 * the exception handler generates a single step debug exception.
683 *
684 * If we get a debug trap on the first instruction of an exception handler,
685 * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is
686 * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR).
687 * The exception handler was handling a non-critical interrupt, so it will
688 * save (and later restore) the MSR via SPRN_SRR1, which will still have
689 * the MSR_DE bit set.
690 */
691 /* 0x2000 - Debug Exception */
692 START_EXCEPTION(0x2000, DebugTrap)
693 CRITICAL_EXCEPTION_PROLOG
694
695 /*
696 * If this is a single step or branch-taken exception in an
697 * exception entry sequence, it was probably meant to apply to
698 * the code where the exception occurred (since exception entry
699 * doesn't turn off DE automatically). We simulate the effect
700 * of turning off DE on entry to an exception handler by turning
701 * off DE in the SRR3 value and clearing the debug status.
702 */
703 mfspr r10,SPRN_DBSR /* check single-step/branch taken */
704 andis. r10,r10,DBSR_IC@h
705 beq+ 2f
706
707 andi. r10,r9,MSR_IR|MSR_PR /* check supervisor + MMU off */
708 beq 1f /* branch and fix it up */
709
710 mfspr r10,SPRN_SRR2 /* Faulting instruction address */
711 cmplwi r10,0x2100
712 bgt+ 2f /* address above exception vectors */
713
714 /* here it looks like we got an inappropriate debug exception. */
7151: rlwinm r9,r9,0,~MSR_DE /* clear DE in the SRR3 value */
716 lis r10,DBSR_IC@h /* clear the IC event */
717 mtspr SPRN_DBSR,r10
718 /* restore state and get out */
719 lwz r10,_CCR(r11)
720 lwz r0,GPR0(r11)
721 lwz r1,GPR1(r11)
722 mtcrf 0x80,r10
723 mtspr SPRN_SRR2,r12
724 mtspr SPRN_SRR3,r9
725 lwz r9,GPR9(r11)
726 lwz r12,GPR12(r11)
727 lwz r10,crit_r10@l(0)
728 lwz r11,crit_r11@l(0)
729 PPC405_ERR77_SYNC
730 rfci
731 b .
732
733 /* continue normal handling for a critical exception... */
7342: mfspr r4,SPRN_DBSR
735 addi r3,r1,STACK_FRAME_OVERHEAD
736 EXC_XFER_TEMPLATE(DebugException, 0x2002, \
737 (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
738 NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
739
740/*
741 * The other Data TLB exceptions bail out to this point
742 * if they can't resolve the lightweight TLB fault.
743 */
744DataAccess:
745 NORMAL_EXCEPTION_PROLOG
746 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
747 stw r5,_ESR(r11)
748 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
749 EXC_XFER_EE_LITE(0x300, handle_page_fault)
750
751/* Other PowerPC processors, namely those derived from the 6xx-series
752 * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
753 * However, for the 4xx-series processors these are neither defined nor
754 * reserved.
755 */
756
757 /* Damn, I came up one instruction too many to fit into the
758 * exception space :-). Both the instruction and data TLB
759 * miss get to this point to load the TLB.
760 * r10 - TLB_TAG value
761 * r11 - Linux PTE
762 * r12, r9 - avilable to use
763 * PID - loaded with proper value when we get here
764 * Upon exit, we reload everything and RFI.
765 * Actually, it will fit now, but oh well.....a common place
766 * to load the TLB.
767 */
768tlb_4xx_index:
769 .long 0
770finish_tlb_load:
771 /* load the next available TLB index.
772 */
773 lwz r9, tlb_4xx_index@l(0)
774 addi r9, r9, 1
775 andi. r9, r9, (PPC4XX_TLB_SIZE-1)
776 stw r9, tlb_4xx_index@l(0)
777
7786:
779 /*
780 * Clear out the software-only bits in the PTE to generate the
781 * TLB_DATA value. These are the bottom 2 bits of the RPM, the
782 * top 3 bits of the zone field, and M.
783 */
784 li r12, 0x0ce2
785 andc r11, r11, r12
786
787 tlbwe r11, r9, TLB_DATA /* Load TLB LO */
788 tlbwe r10, r9, TLB_TAG /* Load TLB HI */
789
790 /* Done...restore registers and get out of here.
791 */
792#ifdef CONFIG_403GCX
793 lwz r12, 12(r0)
794 lwz r11, 8(r0)
795 mtspr SPRN_PID, r12
796 mtcr r11
797 lwz r9, 4(r0)
798 lwz r12, 0(r0)
799#else
800 mfspr r12, SPRN_SPRG6
801 mfspr r11, SPRN_SPRG7
802 mtspr SPRN_PID, r12
803 mtcr r11
804 mfspr r9, SPRN_SPRG5
805 mfspr r12, SPRN_SPRG4
806#endif
807 mfspr r11, SPRN_SPRG1
808 mfspr r10, SPRN_SPRG0
809 PPC405_ERR77_SYNC
810 rfi /* Should sync shadow TLBs */
811 b . /* prevent prefetch past rfi */
812
813/* extern void giveup_fpu(struct task_struct *prev)
814 *
815 * The PowerPC 4xx family of processors do not have an FPU, so this just
816 * returns.
817 */
818_GLOBAL(giveup_fpu)
819 blr
820
821/* This is where the main kernel code starts.
822 */
823start_here:
824
825 /* ptr to current */
826 lis r2,init_task@h
827 ori r2,r2,init_task@l
828
829 /* ptr to phys current thread */
830 tophys(r4,r2)
831 addi r4,r4,THREAD /* init task's THREAD */
832 mtspr SPRN_SPRG3,r4
833
834 /* stack */
835 lis r1,init_thread_union@ha
836 addi r1,r1,init_thread_union@l
837 li r0,0
838 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
839
840 bl early_init /* We have to do this with MMU on */
841
842/*
843 * Decide what sort of machine this is and initialize the MMU.
844 */
845 mr r3,r31
846 mr r4,r30
847 mr r5,r29
848 mr r6,r28
849 mr r7,r27
850 bl machine_init
851 bl MMU_init
852
853/* Go back to running unmapped so we can load up new values
854 * and change to using our exception vectors.
855 * On the 4xx, all we have to do is invalidate the TLB to clear
856 * the old 16M byte TLB mappings.
857 */
858 lis r4,2f@h
859 ori r4,r4,2f@l
860 tophys(r4,r4)
861 lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h
862 ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l
863 mtspr SPRN_SRR0,r4
864 mtspr SPRN_SRR1,r3
865 rfi
866 b . /* prevent prefetch past rfi */
867
868/* Load up the kernel context */
8692:
870 sync /* Flush to memory before changing TLB */
871 tlbia
872 isync /* Flush shadow TLBs */
873
874 /* set up the PTE pointers for the Abatron bdiGDB.
875 */
876 lis r6, swapper_pg_dir@h
877 ori r6, r6, swapper_pg_dir@l
878 lis r5, abatron_pteptrs@h
879 ori r5, r5, abatron_pteptrs@l
880 stw r5, 0xf0(r0) /* Must match your Abatron config file */
881 tophys(r5,r5)
882 stw r6, 0(r5)
883
884/* Now turn on the MMU for real! */
885 lis r4,MSR_KERNEL@h
886 ori r4,r4,MSR_KERNEL@l
887 lis r3,start_kernel@h
888 ori r3,r3,start_kernel@l
889 mtspr SPRN_SRR0,r3
890 mtspr SPRN_SRR1,r4
891 rfi /* enable MMU and jump to start_kernel */
892 b . /* prevent prefetch past rfi */
893
894/* Set up the initial MMU state so we can do the first level of
895 * kernel initialization. This maps the first 16 MBytes of memory 1:1
896 * virtual to physical and more importantly sets the cache mode.
897 */
898initial_mmu:
899 tlbia /* Invalidate all TLB entries */
900 isync
901
902 /* We should still be executing code at physical address 0x0000xxxx
903 * at this point. However, start_here is at virtual address
904 * 0xC000xxxx. So, set up a TLB mapping to cover this once
905 * translation is enabled.
906 */
907
908 lis r3,KERNELBASE@h /* Load the kernel virtual address */
909 ori r3,r3,KERNELBASE@l
910 tophys(r4,r3) /* Load the kernel physical address */
911
912 iccci r0,r3 /* Invalidate the i-cache before use */
913
914 /* Load the kernel PID.
915 */
916 li r0,0
917 mtspr SPRN_PID,r0
918 sync
919
920 /* Configure and load two entries into TLB slots 62 and 63.
921 * In case we are pinning TLBs, these are reserved in by the
922 * other TLB functions. If not reserving, then it doesn't
923 * matter where they are loaded.
924 */
925 clrrwi r4,r4,10 /* Mask off the real page number */
926 ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
927
928 clrrwi r3,r3,10 /* Mask off the effective page number */
929 ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
930
931 li r0,63 /* TLB slot 63 */
932
933 tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */
934 tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */
935
936#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE)
937
938 /* Load a TLB entry for the UART, so that ppc4xx_progress() can use
939 * the UARTs nice and early. We use a 4k real==virtual mapping. */
940
941 lis r3,SERIAL_DEBUG_IO_BASE@h
942 ori r3,r3,SERIAL_DEBUG_IO_BASE@l
943 mr r4,r3
944 clrrwi r4,r4,12
945 ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
946
947 clrrwi r3,r3,12
948 ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
949
950 li r0,0 /* TLB slot 0 */
951 tlbwe r4,r0,TLB_DATA
952 tlbwe r3,r0,TLB_TAG
953#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */
954
955 isync
956
957 /* Establish the exception vector base
958 */
959 lis r4,KERNELBASE@h /* EVPR only uses the high 16-bits */
960 tophys(r0,r4) /* Use the physical address */
961 mtspr SPRN_EVPR,r0
962
963 blr
964
965_GLOBAL(abort)
966 mfspr r13,SPRN_DBCR0
967 oris r13,r13,DBCR0_RST_SYSTEM@h
968 mtspr SPRN_DBCR0,r13
969
970_GLOBAL(set_context)
971
972#ifdef CONFIG_BDI_SWITCH
973 /* Context switch the PTE pointer for the Abatron BDI2000.
974 * The PGDIR is the second parameter.
975 */
976 lis r5, KERNELBASE@h
977 lwz r5, 0xf0(r5)
978 stw r4, 0x4(r5)
979#endif
980 sync
981 mtspr SPRN_PID,r3
982 isync /* Need an isync to flush shadow */
983 /* TLBs after changing PID */
984 blr
985
986/* We put a few things here that have to be page-aligned. This stuff
987 * goes at the beginning of the data segment, which is page-aligned.
988 */
989 .data
990 .align 12
991 .globl sdata
992sdata:
993 .globl empty_zero_page
994empty_zero_page:
995 .space 4096
996 .globl swapper_pg_dir
997swapper_pg_dir:
998 .space 4096
999
1000
1001/* Stack for handling critical exceptions from kernel mode */
1002 .section .bss
1003 .align 12
1004exception_stack_bottom:
1005 .space 4096
1006critical_stack_top:
1007 .globl exception_stack_top
1008exception_stack_top:
1009
1010/* This space gets a copy of optional info passed to us by the bootstrap
1011 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1012 */
1013 .globl cmd_line
1014cmd_line:
1015 .space 512
1016
1017/* Room for two PTE pointers, usually the kernel and current user pointers
1018 * to their respective root page table.
1019 */
1020abatron_pteptrs:
1021 .space 8