diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-07-23 19:15:59 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-08-19 20:25:11 -0400 |
commit | 2d27cfd3286966c04d4192a9db5a6c7ea60eebf1 (patch) | |
tree | a9e3feb764da5a2be1a6ef9b3a0bf694e874a424 /arch/powerpc/kernel | |
parent | 32a74949b7337726e76d69f51c48715431126c6c (diff) |
powerpc: Remaining 64-bit Book3E support
This contains all the bits that didn't fit in previous patches :-) This
includes the actual exception handlers assembly, the changes to the
kernel entry, other misc bits and wiring it all up in Kconfig.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/Makefile | 10 | ||||
-rw-r--r-- | arch/powerpc/kernel/cputable.c | 27 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 60 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64e.S | 784 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 68 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 19 |
6 files changed, 951 insertions, 17 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index b73396b93905..035946f9d5fb 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -33,10 +33,10 @@ obj-y := cputable.o ptrace.o syscalls.o \ | |||
33 | obj-y += vdso32/ | 33 | obj-y += vdso32/ |
34 | obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ | 34 | obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ |
35 | signal_64.o ptrace32.o \ | 35 | signal_64.o ptrace32.o \ |
36 | paca.o cpu_setup_ppc970.o \ | 36 | paca.o nvram_64.o firmware.o |
37 | cpu_setup_pa6t.o \ | 37 | obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o |
38 | firmware.o nvram_64.o | ||
39 | obj64-$(CONFIG_RELOCATABLE) += reloc_64.o | 38 | obj64-$(CONFIG_RELOCATABLE) += reloc_64.o |
39 | obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o | ||
40 | obj-$(CONFIG_PPC64) += vdso64/ | 40 | obj-$(CONFIG_PPC64) += vdso64/ |
41 | obj-$(CONFIG_ALTIVEC) += vecemu.o | 41 | obj-$(CONFIG_ALTIVEC) += vecemu.o |
42 | obj-$(CONFIG_PPC_970_NAP) += idle_power4.o | 42 | obj-$(CONFIG_PPC_970_NAP) += idle_power4.o |
@@ -63,8 +63,8 @@ obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o | |||
63 | obj-$(CONFIG_44x) += cpu_setup_44x.o | 63 | obj-$(CONFIG_44x) += cpu_setup_44x.o |
64 | obj-$(CONFIG_FSL_BOOKE) += cpu_setup_fsl_booke.o dbell.o | 64 | obj-$(CONFIG_FSL_BOOKE) += cpu_setup_fsl_booke.o dbell.o |
65 | 65 | ||
66 | extra-$(CONFIG_PPC_STD_MMU) := head_32.o | 66 | extra-y := head_$(CONFIG_WORD_SIZE).o |
67 | extra-$(CONFIG_PPC64) := head_64.o | 67 | extra-$(CONFIG_PPC_BOOK3E_32) := head_new_booke.o |
68 | extra-$(CONFIG_40x) := head_40x.o | 68 | extra-$(CONFIG_40x) := head_40x.o |
69 | extra-$(CONFIG_44x) := head_44x.o | 69 | extra-$(CONFIG_44x) := head_44x.o |
70 | extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o | 70 | extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 4a24a2fc4574..f34ea37079b5 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -93,7 +93,7 @@ extern void __restore_cpu_power7(void); | |||
93 | PPC_FEATURE_BOOKE) | 93 | PPC_FEATURE_BOOKE) |
94 | 94 | ||
95 | static struct cpu_spec __initdata cpu_specs[] = { | 95 | static struct cpu_spec __initdata cpu_specs[] = { |
96 | #ifdef CONFIG_PPC64 | 96 | #ifdef CONFIG_PPC_BOOK3S_64 |
97 | { /* Power3 */ | 97 | { /* Power3 */ |
98 | .pvr_mask = 0xffff0000, | 98 | .pvr_mask = 0xffff0000, |
99 | .pvr_value = 0x00400000, | 99 | .pvr_value = 0x00400000, |
@@ -508,7 +508,30 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
508 | .machine_check = machine_check_generic, | 508 | .machine_check = machine_check_generic, |
509 | .platform = "power4", | 509 | .platform = "power4", |
510 | } | 510 | } |
511 | #endif /* CONFIG_PPC64 */ | 511 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
512 | #ifdef CONFIG_PPC_BOOK3E_64 | ||
513 | { /* This is a default entry to get going, to be replaced by | ||
514 | * a real one at some stage | ||
515 | */ | ||
516 | #define CPU_FTRS_BASE_BOOK3E (CPU_FTR_USE_TB | \ | ||
517 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_SMT | \ | ||
518 | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) | ||
519 | .pvr_mask = 0x00000000, | ||
520 | .pvr_value = 0x00000000, | ||
521 | .cpu_name = "Book3E", | ||
522 | .cpu_features = CPU_FTRS_BASE_BOOK3E, | ||
523 | .cpu_user_features = COMMON_USER_PPC64, | ||
524 | .mmu_features = MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX | | ||
525 | MMU_FTR_USE_TLBIVAX_BCAST | | ||
526 | MMU_FTR_LOCK_BCAST_INVAL, | ||
527 | .icache_bsize = 64, | ||
528 | .dcache_bsize = 64, | ||
529 | .num_pmcs = 0, | ||
530 | .machine_check = machine_check_generic, | ||
531 | .platform = "power6", | ||
532 | }, | ||
533 | #endif | ||
534 | |||
512 | #ifdef CONFIG_PPC32 | 535 | #ifdef CONFIG_PPC32 |
513 | #if CLASSIC_PPC | 536 | #if CLASSIC_PPC |
514 | { /* 601 */ | 537 | { /* 601 */ |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 1cb0f3d1714b..66bcda34a6bb 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -120,9 +120,15 @@ BEGIN_FW_FTR_SECTION | |||
120 | 2: | 120 | 2: |
121 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | 121 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
122 | #endif /* CONFIG_PPC_ISERIES */ | 122 | #endif /* CONFIG_PPC_ISERIES */ |
123 | |||
124 | /* Hard enable interrupts */ | ||
125 | #ifdef CONFIG_PPC_BOOK3E | ||
126 | wrteei 1 | ||
127 | #else | ||
123 | mfmsr r11 | 128 | mfmsr r11 |
124 | ori r11,r11,MSR_EE | 129 | ori r11,r11,MSR_EE |
125 | mtmsrd r11,1 | 130 | mtmsrd r11,1 |
131 | #endif /* CONFIG_PPC_BOOK3E */ | ||
126 | 132 | ||
127 | #ifdef SHOW_SYSCALLS | 133 | #ifdef SHOW_SYSCALLS |
128 | bl .do_show_syscall | 134 | bl .do_show_syscall |
@@ -168,15 +174,25 @@ syscall_exit: | |||
168 | #endif | 174 | #endif |
169 | clrrdi r12,r1,THREAD_SHIFT | 175 | clrrdi r12,r1,THREAD_SHIFT |
170 | 176 | ||
171 | /* disable interrupts so current_thread_info()->flags can't change, | ||
172 | and so that we don't get interrupted after loading SRR0/1. */ | ||
173 | ld r8,_MSR(r1) | 177 | ld r8,_MSR(r1) |
178 | #ifdef CONFIG_PPC_BOOK3S | ||
179 | /* No MSR:RI on BookE */ | ||
174 | andi. r10,r8,MSR_RI | 180 | andi. r10,r8,MSR_RI |
175 | beq- unrecov_restore | 181 | beq- unrecov_restore |
182 | #endif | ||
183 | |||
184 | /* Disable interrupts so current_thread_info()->flags can't change, | ||
185 | * and so that we don't get interrupted after loading SRR0/1. | ||
186 | */ | ||
187 | #ifdef CONFIG_PPC_BOOK3E | ||
188 | wrteei 0 | ||
189 | #else | ||
176 | mfmsr r10 | 190 | mfmsr r10 |
177 | rldicl r10,r10,48,1 | 191 | rldicl r10,r10,48,1 |
178 | rotldi r10,r10,16 | 192 | rotldi r10,r10,16 |
179 | mtmsrd r10,1 | 193 | mtmsrd r10,1 |
194 | #endif /* CONFIG_PPC_BOOK3E */ | ||
195 | |||
180 | ld r9,TI_FLAGS(r12) | 196 | ld r9,TI_FLAGS(r12) |
181 | li r11,-_LAST_ERRNO | 197 | li r11,-_LAST_ERRNO |
182 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) | 198 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) |
@@ -194,9 +210,13 @@ syscall_error_cont: | |||
194 | * userspace and we take an exception after restoring r13, | 210 | * userspace and we take an exception after restoring r13, |
195 | * we end up corrupting the userspace r13 value. | 211 | * we end up corrupting the userspace r13 value. |
196 | */ | 212 | */ |
213 | #ifdef CONFIG_PPC_BOOK3S | ||
214 | /* No MSR:RI on BookE */ | ||
197 | li r12,MSR_RI | 215 | li r12,MSR_RI |
198 | andc r11,r10,r12 | 216 | andc r11,r10,r12 |
199 | mtmsrd r11,1 /* clear MSR.RI */ | 217 | mtmsrd r11,1 /* clear MSR.RI */ |
218 | #endif /* CONFIG_PPC_BOOK3S */ | ||
219 | |||
200 | beq- 1f | 220 | beq- 1f |
201 | ACCOUNT_CPU_USER_EXIT(r11, r12) | 221 | ACCOUNT_CPU_USER_EXIT(r11, r12) |
202 | ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ | 222 | ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ |
@@ -206,7 +226,7 @@ syscall_error_cont: | |||
206 | mtcr r5 | 226 | mtcr r5 |
207 | mtspr SPRN_SRR0,r7 | 227 | mtspr SPRN_SRR0,r7 |
208 | mtspr SPRN_SRR1,r8 | 228 | mtspr SPRN_SRR1,r8 |
209 | rfid | 229 | RFI |
210 | b . /* prevent speculative execution */ | 230 | b . /* prevent speculative execution */ |
211 | 231 | ||
212 | syscall_error: | 232 | syscall_error: |
@@ -276,9 +296,13 @@ syscall_exit_work: | |||
276 | beq .ret_from_except_lite | 296 | beq .ret_from_except_lite |
277 | 297 | ||
278 | /* Re-enable interrupts */ | 298 | /* Re-enable interrupts */ |
299 | #ifdef CONFIG_PPC_BOOK3E | ||
300 | wrteei 1 | ||
301 | #else | ||
279 | mfmsr r10 | 302 | mfmsr r10 |
280 | ori r10,r10,MSR_EE | 303 | ori r10,r10,MSR_EE |
281 | mtmsrd r10,1 | 304 | mtmsrd r10,1 |
305 | #endif /* CONFIG_PPC_BOOK3E */ | ||
282 | 306 | ||
283 | bl .save_nvgprs | 307 | bl .save_nvgprs |
284 | addi r3,r1,STACK_FRAME_OVERHEAD | 308 | addi r3,r1,STACK_FRAME_OVERHEAD |
@@ -380,7 +404,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
380 | and. r0,r0,r22 | 404 | and. r0,r0,r22 |
381 | beq+ 1f | 405 | beq+ 1f |
382 | andc r22,r22,r0 | 406 | andc r22,r22,r0 |
383 | mtmsrd r22 | 407 | MTMSRD(r22) |
384 | isync | 408 | isync |
385 | 1: std r20,_NIP(r1) | 409 | 1: std r20,_NIP(r1) |
386 | mfcr r23 | 410 | mfcr r23 |
@@ -399,6 +423,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
399 | std r6,PACACURRENT(r13) /* Set new 'current' */ | 423 | std r6,PACACURRENT(r13) /* Set new 'current' */ |
400 | 424 | ||
401 | ld r8,KSP(r4) /* new stack pointer */ | 425 | ld r8,KSP(r4) /* new stack pointer */ |
426 | #ifdef CONFIG_PPC_BOOK3S | ||
402 | BEGIN_FTR_SECTION | 427 | BEGIN_FTR_SECTION |
403 | BEGIN_FTR_SECTION_NESTED(95) | 428 | BEGIN_FTR_SECTION_NESTED(95) |
404 | clrrdi r6,r8,28 /* get its ESID */ | 429 | clrrdi r6,r8,28 /* get its ESID */ |
@@ -445,8 +470,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) | |||
445 | slbie r6 /* Workaround POWER5 < DD2.1 issue */ | 470 | slbie r6 /* Workaround POWER5 < DD2.1 issue */ |
446 | slbmte r7,r0 | 471 | slbmte r7,r0 |
447 | isync | 472 | isync |
448 | |||
449 | 2: | 473 | 2: |
474 | #endif /* !CONFIG_PPC_BOOK3S */ | ||
475 | |||
450 | clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ | 476 | clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ |
451 | /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE | 477 | /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE |
452 | because we don't need to leave the 288-byte ABI gap at the | 478 | because we don't need to leave the 288-byte ABI gap at the |
@@ -490,10 +516,14 @@ _GLOBAL(ret_from_except_lite) | |||
490 | * can't change between when we test it and when we return | 516 | * can't change between when we test it and when we return |
491 | * from the interrupt. | 517 | * from the interrupt. |
492 | */ | 518 | */ |
519 | #ifdef CONFIG_PPC_BOOK3E | ||
520 | wrteei 0 | ||
521 | #else | ||
493 | mfmsr r10 /* Get current interrupt state */ | 522 | mfmsr r10 /* Get current interrupt state */ |
494 | rldicl r9,r10,48,1 /* clear MSR_EE */ | 523 | rldicl r9,r10,48,1 /* clear MSR_EE */ |
495 | rotldi r9,r9,16 | 524 | rotldi r9,r9,16 |
496 | mtmsrd r9,1 /* Update machine state */ | 525 | mtmsrd r9,1 /* Update machine state */ |
526 | #endif /* CONFIG_PPC_BOOK3E */ | ||
497 | 527 | ||
498 | #ifdef CONFIG_PREEMPT | 528 | #ifdef CONFIG_PREEMPT |
499 | clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ | 529 | clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ |
@@ -540,6 +570,9 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) | |||
540 | rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ | 570 | rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ |
541 | stb r4,PACAHARDIRQEN(r13) | 571 | stb r4,PACAHARDIRQEN(r13) |
542 | 572 | ||
573 | #ifdef CONFIG_PPC_BOOK3E | ||
574 | b .exception_return_book3e | ||
575 | #else | ||
543 | ld r4,_CTR(r1) | 576 | ld r4,_CTR(r1) |
544 | ld r0,_LINK(r1) | 577 | ld r0,_LINK(r1) |
545 | mtctr r4 | 578 | mtctr r4 |
@@ -588,6 +621,8 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) | |||
588 | rfid | 621 | rfid |
589 | b . /* prevent speculative execution */ | 622 | b . /* prevent speculative execution */ |
590 | 623 | ||
624 | #endif /* CONFIG_PPC_BOOK3E */ | ||
625 | |||
591 | iseries_check_pending_irqs: | 626 | iseries_check_pending_irqs: |
592 | #ifdef CONFIG_PPC_ISERIES | 627 | #ifdef CONFIG_PPC_ISERIES |
593 | ld r5,SOFTE(r1) | 628 | ld r5,SOFTE(r1) |
@@ -638,6 +673,11 @@ do_work: | |||
638 | li r0,1 | 673 | li r0,1 |
639 | stb r0,PACASOFTIRQEN(r13) | 674 | stb r0,PACASOFTIRQEN(r13) |
640 | stb r0,PACAHARDIRQEN(r13) | 675 | stb r0,PACAHARDIRQEN(r13) |
676 | #ifdef CONFIG_PPC_BOOK3E | ||
677 | wrteei 1 | ||
678 | bl .preempt_schedule | ||
679 | wrteei 0 | ||
680 | #else | ||
641 | ori r10,r10,MSR_EE | 681 | ori r10,r10,MSR_EE |
642 | mtmsrd r10,1 /* reenable interrupts */ | 682 | mtmsrd r10,1 /* reenable interrupts */ |
643 | bl .preempt_schedule | 683 | bl .preempt_schedule |
@@ -646,6 +686,7 @@ do_work: | |||
646 | rldicl r10,r10,48,1 /* disable interrupts again */ | 686 | rldicl r10,r10,48,1 /* disable interrupts again */ |
647 | rotldi r10,r10,16 | 687 | rotldi r10,r10,16 |
648 | mtmsrd r10,1 | 688 | mtmsrd r10,1 |
689 | #endif /* CONFIG_PPC_BOOK3E */ | ||
649 | ld r4,TI_FLAGS(r9) | 690 | ld r4,TI_FLAGS(r9) |
650 | andi. r0,r4,_TIF_NEED_RESCHED | 691 | andi. r0,r4,_TIF_NEED_RESCHED |
651 | bne 1b | 692 | bne 1b |
@@ -654,8 +695,12 @@ do_work: | |||
654 | user_work: | 695 | user_work: |
655 | #endif | 696 | #endif |
656 | /* Enable interrupts */ | 697 | /* Enable interrupts */ |
698 | #ifdef CONFIG_PPC_BOOK3E | ||
699 | wrteei 1 | ||
700 | #else | ||
657 | ori r10,r10,MSR_EE | 701 | ori r10,r10,MSR_EE |
658 | mtmsrd r10,1 | 702 | mtmsrd r10,1 |
703 | #endif /* CONFIG_PPC_BOOK3E */ | ||
659 | 704 | ||
660 | andi. r0,r4,_TIF_NEED_RESCHED | 705 | andi. r0,r4,_TIF_NEED_RESCHED |
661 | beq 1f | 706 | beq 1f |
@@ -837,6 +882,10 @@ _GLOBAL(enter_prom) | |||
837 | 882 | ||
838 | /* Switch MSR to 32 bits mode | 883 | /* Switch MSR to 32 bits mode |
839 | */ | 884 | */ |
885 | #ifdef CONFIG_PPC_BOOK3E | ||
886 | rlwinm r11,r11,0,1,31 | ||
887 | mtmsr r11 | ||
888 | #else /* CONFIG_PPC_BOOK3E */ | ||
840 | mfmsr r11 | 889 | mfmsr r11 |
841 | li r12,1 | 890 | li r12,1 |
842 | rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) | 891 | rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) |
@@ -845,6 +894,7 @@ _GLOBAL(enter_prom) | |||
845 | rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) | 894 | rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) |
846 | andc r11,r11,r12 | 895 | andc r11,r11,r12 |
847 | mtmsrd r11 | 896 | mtmsrd r11 |
897 | #endif /* CONFIG_PPC_BOOK3E */ | ||
848 | isync | 898 | isync |
849 | 899 | ||
850 | /* Enter PROM here... */ | 900 | /* Enter PROM here... */ |
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S new file mode 100644 index 000000000000..695d4847d228 --- /dev/null +++ b/arch/powerpc/kernel/exceptions-64e.S | |||
@@ -0,0 +1,784 @@ | |||
1 | /* | ||
2 | * Boot code and exception vectors for Book3E processors | ||
3 | * | ||
4 | * Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/threads.h> | ||
13 | #include <asm/reg.h> | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/ppc_asm.h> | ||
16 | #include <asm/asm-offsets.h> | ||
17 | #include <asm/cputable.h> | ||
18 | #include <asm/setup.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | #include <asm/reg.h> | ||
21 | #include <asm/exception-64e.h> | ||
22 | #include <asm/bug.h> | ||
23 | #include <asm/irqflags.h> | ||
24 | #include <asm/ptrace.h> | ||
25 | #include <asm/ppc-opcode.h> | ||
26 | #include <asm/mmu.h> | ||
27 | |||
28 | /* XXX This will ultimately add space for a special exception save | ||
29 | * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc... | ||
30 | * when taking special interrupts. For now we don't support that, | ||
31 | * special interrupts from within a non-standard level will probably | ||
32 | * blow you up | ||
33 | */ | ||
34 | #define SPECIAL_EXC_FRAME_SIZE INT_FRAME_SIZE | ||
35 | |||
36 | /* Exception prolog code for all exceptions */ | ||
37 | #define EXCEPTION_PROLOG(n, type, addition) \ | ||
38 | mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \ | ||
39 | mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \ | ||
40 | std r10,PACA_EX##type+EX_R10(r13); \ | ||
41 | std r11,PACA_EX##type+EX_R11(r13); \ | ||
42 | mfcr r10; /* save CR */ \ | ||
43 | addition; /* additional code for that exc. */ \ | ||
44 | std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \ | ||
45 | stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \ | ||
46 | mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \ | ||
47 | type##_SET_KSTACK; /* get special stack if necessary */\ | ||
48 | andi. r10,r11,MSR_PR; /* save stack pointer */ \ | ||
49 | beq 1f; /* branch around if supervisor */ \ | ||
50 | ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\ | ||
51 | 1: cmpdi cr1,r1,0; /* check if SP makes sense */ \ | ||
52 | bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \ | ||
53 | mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */ | ||
54 | |||
55 | /* Exception type-specific macros */ | ||
56 | #define GEN_SET_KSTACK \ | ||
57 | subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ | ||
58 | #define SPRN_GEN_SRR0 SPRN_SRR0 | ||
59 | #define SPRN_GEN_SRR1 SPRN_SRR1 | ||
60 | |||
61 | #define CRIT_SET_KSTACK \ | ||
62 | ld r1,PACA_CRIT_STACK(r13); \ | ||
63 | subi r1,r1,SPECIAL_EXC_FRAME_SIZE; | ||
64 | #define SPRN_CRIT_SRR0 SPRN_CSRR0 | ||
65 | #define SPRN_CRIT_SRR1 SPRN_CSRR1 | ||
66 | |||
67 | #define DBG_SET_KSTACK \ | ||
68 | ld r1,PACA_DBG_STACK(r13); \ | ||
69 | subi r1,r1,SPECIAL_EXC_FRAME_SIZE; | ||
70 | #define SPRN_DBG_SRR0 SPRN_DSRR0 | ||
71 | #define SPRN_DBG_SRR1 SPRN_DSRR1 | ||
72 | |||
73 | #define MC_SET_KSTACK \ | ||
74 | ld r1,PACA_MC_STACK(r13); \ | ||
75 | subi r1,r1,SPECIAL_EXC_FRAME_SIZE; | ||
76 | #define SPRN_MC_SRR0 SPRN_MCSRR0 | ||
77 | #define SPRN_MC_SRR1 SPRN_MCSRR1 | ||
78 | |||
79 | #define NORMAL_EXCEPTION_PROLOG(n, addition) \ | ||
80 | EXCEPTION_PROLOG(n, GEN, addition##_GEN) | ||
81 | |||
82 | #define CRIT_EXCEPTION_PROLOG(n, addition) \ | ||
83 | EXCEPTION_PROLOG(n, CRIT, addition##_CRIT) | ||
84 | |||
85 | #define DBG_EXCEPTION_PROLOG(n, addition) \ | ||
86 | EXCEPTION_PROLOG(n, DBG, addition##_DBG) | ||
87 | |||
88 | #define MC_EXCEPTION_PROLOG(n, addition) \ | ||
89 | EXCEPTION_PROLOG(n, MC, addition##_MC) | ||
90 | |||
91 | |||
92 | /* Variants of the "addition" argument for the prolog | ||
93 | */ | ||
94 | #define PROLOG_ADDITION_NONE_GEN | ||
95 | #define PROLOG_ADDITION_NONE_CRIT | ||
96 | #define PROLOG_ADDITION_NONE_DBG | ||
97 | #define PROLOG_ADDITION_NONE_MC | ||
98 | |||
99 | #define PROLOG_ADDITION_MASKABLE_GEN \ | ||
100 | lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ | ||
101 | cmpwi cr0,r11,0; /* yes -> go out of line */ \ | ||
102 | beq masked_interrupt_book3e; | ||
103 | |||
104 | #define PROLOG_ADDITION_2REGS_GEN \ | ||
105 | std r14,PACA_EXGEN+EX_R14(r13); \ | ||
106 | std r15,PACA_EXGEN+EX_R15(r13) | ||
107 | |||
108 | #define PROLOG_ADDITION_1REG_GEN \ | ||
109 | std r14,PACA_EXGEN+EX_R14(r13); | ||
110 | |||
111 | #define PROLOG_ADDITION_2REGS_CRIT \ | ||
112 | std r14,PACA_EXCRIT+EX_R14(r13); \ | ||
113 | std r15,PACA_EXCRIT+EX_R15(r13) | ||
114 | |||
115 | #define PROLOG_ADDITION_2REGS_DBG \ | ||
116 | std r14,PACA_EXDBG+EX_R14(r13); \ | ||
117 | std r15,PACA_EXDBG+EX_R15(r13) | ||
118 | |||
119 | #define PROLOG_ADDITION_2REGS_MC \ | ||
120 | std r14,PACA_EXMC+EX_R14(r13); \ | ||
121 | std r15,PACA_EXMC+EX_R15(r13) | ||
122 | |||
123 | /* Core exception code for all exceptions except TLB misses. | ||
124 | * XXX: Needs to make SPRN_SPRG_GEN depend on exception type | ||
125 | */ | ||
126 | #define EXCEPTION_COMMON(n, excf, ints) \ | ||
127 | std r0,GPR0(r1); /* save r0 in stackframe */ \ | ||
128 | std r2,GPR2(r1); /* save r2 in stackframe */ \ | ||
129 | SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ | ||
130 | SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ | ||
131 | std r9,GPR9(r1); /* save r9 in stackframe */ \ | ||
132 | std r10,_NIP(r1); /* save SRR0 to stackframe */ \ | ||
133 | std r11,_MSR(r1); /* save SRR1 to stackframe */ \ | ||
134 | ACCOUNT_CPU_USER_ENTRY(r10,r11);/* accounting (uses cr0+eq) */ \ | ||
135 | ld r3,excf+EX_R10(r13); /* get back r10 */ \ | ||
136 | ld r4,excf+EX_R11(r13); /* get back r11 */ \ | ||
137 | mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 */ \ | ||
138 | std r12,GPR12(r1); /* save r12 in stackframe */ \ | ||
139 | ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ | ||
140 | mflr r6; /* save LR in stackframe */ \ | ||
141 | mfctr r7; /* save CTR in stackframe */ \ | ||
142 | mfspr r8,SPRN_XER; /* save XER in stackframe */ \ | ||
143 | ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \ | ||
144 | lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \ | ||
145 | lbz r11,PACASOFTIRQEN(r13); /* get current IRQ softe */ \ | ||
146 | ld r12,exception_marker@toc(r2); \ | ||
147 | li r0,0; \ | ||
148 | std r3,GPR10(r1); /* save r10 to stackframe */ \ | ||
149 | std r4,GPR11(r1); /* save r11 to stackframe */ \ | ||
150 | std r5,GPR13(r1); /* save it to stackframe */ \ | ||
151 | std r6,_LINK(r1); \ | ||
152 | std r7,_CTR(r1); \ | ||
153 | std r8,_XER(r1); \ | ||
154 | li r3,(n)+1; /* indicate partial regs in trap */ \ | ||
155 | std r9,0(r1); /* store stack frame back link */ \ | ||
156 | std r10,_CCR(r1); /* store orig CR in stackframe */ \ | ||
157 | std r9,GPR1(r1); /* store stack frame back link */ \ | ||
158 | std r11,SOFTE(r1); /* and save it to stackframe */ \ | ||
159 | std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ | ||
160 | std r3,_TRAP(r1); /* set trap number */ \ | ||
161 | std r0,RESULT(r1); /* clear regs->result */ \ | ||
162 | ints; | ||
163 | |||
164 | /* Variants for the "ints" argument */ | ||
165 | #define INTS_KEEP | ||
166 | #define INTS_DISABLE_SOFT \ | ||
167 | stb r0,PACASOFTIRQEN(r13); /* mark interrupts soft-disabled */ \ | ||
168 | TRACE_DISABLE_INTS; | ||
169 | #define INTS_DISABLE_HARD \ | ||
170 | stb r0,PACAHARDIRQEN(r13); /* and hard disabled */ | ||
171 | #define INTS_DISABLE_ALL \ | ||
172 | INTS_DISABLE_SOFT \ | ||
173 | INTS_DISABLE_HARD | ||
174 | |||
175 | /* This is called by exceptions that used INTS_KEEP (that is did not clear | ||
176 | * neither soft nor hard IRQ indicators in the PACA. This will restore MSR:EE | ||
177 | * to it's previous value | ||
178 | * | ||
179 | * XXX In the long run, we may want to open-code it in order to separate the | ||
180 | * load from the wrtee, thus limiting the latency caused by the dependency | ||
181 | * but at this point, I'll favor code clarity until we have a near to final | ||
182 | * implementation | ||
183 | */ | ||
184 | #define INTS_RESTORE_HARD \ | ||
185 | ld r11,_MSR(r1); \ | ||
186 | wrtee r11; | ||
187 | |||
188 | /* XXX FIXME: Restore r14/r15 when necessary */ | ||
189 | #define BAD_STACK_TRAMPOLINE(n) \ | ||
190 | exc_##n##_bad_stack: \ | ||
191 | li r1,(n); /* get exception number */ \ | ||
192 | sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \ | ||
193 | b bad_stack_book3e; /* bad stack error */ | ||
194 | |||
195 | #define EXCEPTION_STUB(loc, label) \ | ||
196 | . = interrupt_base_book3e + loc; \ | ||
197 | nop; /* To make debug interrupts happy */ \ | ||
198 | b exc_##label##_book3e; | ||
199 | |||
200 | #define ACK_NONE(r) | ||
201 | #define ACK_DEC(r) \ | ||
202 | lis r,TSR_DIS@h; \ | ||
203 | mtspr SPRN_TSR,r | ||
204 | #define ACK_FIT(r) \ | ||
205 | lis r,TSR_FIS@h; \ | ||
206 | mtspr SPRN_TSR,r | ||
207 | |||
208 | #define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack) \ | ||
209 | START_EXCEPTION(label); \ | ||
210 | NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE) \ | ||
211 | EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE_ALL) \ | ||
212 | ack(r8); \ | ||
213 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
214 | bl hdlr; \ | ||
215 | b .ret_from_except_lite; | ||
216 | |||
217 | /* This value is used to mark exception frames on the stack. */ | ||
218 | .section ".toc","aw" | ||
219 | exception_marker: | ||
220 | .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER | ||
221 | |||
222 | |||
223 | /* | ||
224 | * And here we have the exception vectors ! | ||
225 | */ | ||
226 | |||
227 | .text | ||
228 | .balign 0x1000 | ||
229 | .globl interrupt_base_book3e | ||
230 | interrupt_base_book3e: /* fake trap */ | ||
231 | /* Note: If real debug exceptions are supported by the HW, the vector | ||
232 | * below will have to be patched up to point to an appropriate handler | ||
233 | */ | ||
234 | EXCEPTION_STUB(0x000, machine_check) /* 0x0200 */ | ||
235 | EXCEPTION_STUB(0x020, critical_input) /* 0x0580 */ | ||
236 | EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */ | ||
237 | EXCEPTION_STUB(0x060, data_storage) /* 0x0300 */ | ||
238 | EXCEPTION_STUB(0x080, instruction_storage) /* 0x0400 */ | ||
239 | EXCEPTION_STUB(0x0a0, external_input) /* 0x0500 */ | ||
240 | EXCEPTION_STUB(0x0c0, alignment) /* 0x0600 */ | ||
241 | EXCEPTION_STUB(0x0e0, program) /* 0x0700 */ | ||
242 | EXCEPTION_STUB(0x100, fp_unavailable) /* 0x0800 */ | ||
243 | EXCEPTION_STUB(0x120, system_call) /* 0x0c00 */ | ||
244 | EXCEPTION_STUB(0x140, ap_unavailable) /* 0x0f20 */ | ||
245 | EXCEPTION_STUB(0x160, decrementer) /* 0x0900 */ | ||
246 | EXCEPTION_STUB(0x180, fixed_interval) /* 0x0980 */ | ||
247 | EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */ | ||
248 | EXCEPTION_STUB(0x1c0, data_tlb_miss) | ||
249 | EXCEPTION_STUB(0x1e0, instruction_tlb_miss) | ||
250 | |||
251 | #if 0 | ||
252 | EXCEPTION_STUB(0x280, processor_doorbell) | ||
253 | EXCEPTION_STUB(0x220, processor_doorbell_crit) | ||
254 | #endif | ||
255 | .globl interrupt_end_book3e | ||
256 | interrupt_end_book3e: | ||
257 | |||
258 | /* Critical Input Interrupt */ | ||
259 | START_EXCEPTION(critical_input); | ||
260 | CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE) | ||
261 | // EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE_ALL) | ||
262 | // bl special_reg_save_crit | ||
263 | // addi r3,r1,STACK_FRAME_OVERHEAD | ||
264 | // bl .critical_exception | ||
265 | // b ret_from_crit_except | ||
266 | b . | ||
267 | |||
268 | /* Machine Check Interrupt */ | ||
269 | START_EXCEPTION(machine_check); | ||
270 | CRIT_EXCEPTION_PROLOG(0x200, PROLOG_ADDITION_NONE) | ||
271 | // EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE_ALL) | ||
272 | // bl special_reg_save_mc | ||
273 | // addi r3,r1,STACK_FRAME_OVERHEAD | ||
274 | // bl .machine_check_exception | ||
275 | // b ret_from_mc_except | ||
276 | b . | ||
277 | |||
278 | /* Data Storage Interrupt */ | ||
279 | START_EXCEPTION(data_storage) | ||
280 | NORMAL_EXCEPTION_PROLOG(0x300, PROLOG_ADDITION_2REGS) | ||
281 | mfspr r14,SPRN_DEAR | ||
282 | mfspr r15,SPRN_ESR | ||
283 | EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_KEEP) | ||
284 | b storage_fault_common | ||
285 | |||
286 | /* Instruction Storage Interrupt */ | ||
287 | START_EXCEPTION(instruction_storage); | ||
288 | NORMAL_EXCEPTION_PROLOG(0x400, PROLOG_ADDITION_2REGS) | ||
289 | li r15,0 | ||
290 | mr r14,r10 | ||
291 | EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_KEEP) | ||
292 | b storage_fault_common | ||
293 | |||
294 | /* External Input Interrupt */ | ||
295 | MASKABLE_EXCEPTION(0x500, external_input, .do_IRQ, ACK_NONE) | ||
296 | |||
297 | /* Alignment */ | ||
298 | START_EXCEPTION(alignment); | ||
299 | NORMAL_EXCEPTION_PROLOG(0x600, PROLOG_ADDITION_2REGS) | ||
300 | mfspr r14,SPRN_DEAR | ||
301 | mfspr r15,SPRN_ESR | ||
302 | EXCEPTION_COMMON(0x600, PACA_EXGEN, INTS_KEEP) | ||
303 | b alignment_more /* no room, go out of line */ | ||
304 | |||
305 | /* Program Interrupt */ | ||
306 | START_EXCEPTION(program); | ||
307 | NORMAL_EXCEPTION_PROLOG(0x700, PROLOG_ADDITION_1REG) | ||
308 | mfspr r14,SPRN_ESR | ||
309 | EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE_SOFT) | ||
310 | std r14,_DSISR(r1) | ||
311 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
312 | ld r14,PACA_EXGEN+EX_R14(r13) | ||
313 | bl .save_nvgprs | ||
314 | INTS_RESTORE_HARD | ||
315 | bl .program_check_exception | ||
316 | b .ret_from_except | ||
317 | |||
318 | /* Floating Point Unavailable Interrupt */ | ||
319 | START_EXCEPTION(fp_unavailable); | ||
320 | NORMAL_EXCEPTION_PROLOG(0x800, PROLOG_ADDITION_NONE) | ||
321 | /* we can probably do a shorter exception entry for that one... */ | ||
322 | EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP) | ||
323 | bne 1f /* if from user, just load it up */ | ||
324 | bl .save_nvgprs | ||
325 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
326 | INTS_RESTORE_HARD | ||
327 | bl .kernel_fp_unavailable_exception | ||
328 | BUG_OPCODE | ||
329 | 1: ld r12,_MSR(r1) | ||
330 | bl .load_up_fpu | ||
331 | b fast_exception_return | ||
332 | |||
333 | /* Decrementer Interrupt */ | ||
334 | MASKABLE_EXCEPTION(0x900, decrementer, .timer_interrupt, ACK_DEC) | ||
335 | |||
336 | /* Fixed Interval Timer Interrupt */ | ||
337 | MASKABLE_EXCEPTION(0x980, fixed_interval, .unknown_exception, ACK_FIT) | ||
338 | |||
339 | /* Watchdog Timer Interrupt */ | ||
340 | START_EXCEPTION(watchdog); | ||
341 | CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE) | ||
342 | // EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE_ALL) | ||
343 | // bl special_reg_save_crit | ||
344 | // addi r3,r1,STACK_FRAME_OVERHEAD | ||
345 | // bl .unknown_exception | ||
346 | // b ret_from_crit_except | ||
347 | b . | ||
348 | |||
349 | /* System Call Interrupt */ | ||
350 | START_EXCEPTION(system_call) | ||
351 | mr r9,r13 /* keep a copy of userland r13 */ | ||
352 | mfspr r11,SPRN_SRR0 /* get return address */ | ||
353 | mfspr r12,SPRN_SRR1 /* get previous MSR */ | ||
354 | mfspr r13,SPRN_SPRG_PACA /* get our PACA */ | ||
355 | b system_call_common | ||
356 | |||
357 | /* Auxillary Processor Unavailable Interrupt */ | ||
358 | START_EXCEPTION(ap_unavailable); | ||
359 | NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) | ||
360 | EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP) | ||
361 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
362 | bl .save_nvgprs | ||
363 | INTS_RESTORE_HARD | ||
364 | bl .unknown_exception | ||
365 | b .ret_from_except | ||
366 | |||
367 | /* Debug exception as a critical interrupt*/ | ||
368 | START_EXCEPTION(debug_crit); | ||
369 | CRIT_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS) | ||
370 | |||
371 | /* | ||
372 | * If there is a single step or branch-taken exception in an | ||
373 | * exception entry sequence, it was probably meant to apply to | ||
374 | * the code where the exception occurred (since exception entry | ||
375 | * doesn't turn off DE automatically). We simulate the effect | ||
376 | * of turning off DE on entry to an exception handler by turning | ||
377 | * off DE in the CSRR1 value and clearing the debug status. | ||
378 | */ | ||
379 | |||
380 | mfspr r14,SPRN_DBSR /* check single-step/branch taken */ | ||
381 | andis. r15,r14,DBSR_IC@h | ||
382 | beq+ 1f | ||
383 | |||
384 | LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) | ||
385 | LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e) | ||
386 | cmpld cr0,r10,r14 | ||
387 | cmpld cr1,r10,r15 | ||
388 | blt+ cr0,1f | ||
389 | bge+ cr1,1f | ||
390 | |||
391 | /* here it looks like we got an inappropriate debug exception. */ | ||
392 | lis r14,DBSR_IC@h /* clear the IC event */ | ||
393 | rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */ | ||
394 | mtspr SPRN_DBSR,r14 | ||
395 | mtspr SPRN_CSRR1,r11 | ||
396 | lwz r10,PACA_EXCRIT+EX_CR(r13) /* restore registers */ | ||
397 | ld r1,PACA_EXCRIT+EX_R1(r13) | ||
398 | ld r14,PACA_EXCRIT+EX_R14(r13) | ||
399 | ld r15,PACA_EXCRIT+EX_R15(r13) | ||
400 | mtcr r10 | ||
401 | ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */ | ||
402 | ld r11,PACA_EXCRIT+EX_R11(r13) | ||
403 | mfspr r13,SPRN_SPRG_CRIT_SCRATCH | ||
404 | rfci | ||
405 | |||
406 | /* Normal debug exception */ | ||
407 | /* XXX We only handle coming from userspace for now since we can't | ||
408 | * quite save properly an interrupted kernel state yet | ||
409 | */ | ||
410 | 1: andi. r14,r11,MSR_PR; /* check for userspace again */ | ||
411 | beq kernel_dbg_exc; /* if from kernel mode */ | ||
412 | |||
413 | /* Now we mash up things to make it look like we are coming on a | ||
414 | * normal exception | ||
415 | */ | ||
416 | mfspr r15,SPRN_SPRG_CRIT_SCRATCH | ||
417 | mtspr SPRN_SPRG_GEN_SCRATCH,r15 | ||
418 | mfspr r14,SPRN_DBSR | ||
419 | EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE_ALL) | ||
420 | std r14,_DSISR(r1) | ||
421 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
422 | mr r4,r14 | ||
423 | ld r14,PACA_EXCRIT+EX_R14(r13) | ||
424 | ld r15,PACA_EXCRIT+EX_R15(r13) | ||
425 | bl .save_nvgprs | ||
426 | bl .DebugException | ||
427 | b .ret_from_except | ||
428 | |||
429 | kernel_dbg_exc: | ||
430 | b . /* NYI */ | ||
431 | |||
432 | |||
433 | /* | ||
434 | * An interrupt came in while soft-disabled; clear EE in SRR1, | ||
435 | * clear paca->hard_enabled and return. | ||
436 | */ | ||
437 | masked_interrupt_book3e: | ||
438 | mtcr r10 | ||
439 | stb r11,PACAHARDIRQEN(r13) | ||
440 | mfspr r10,SPRN_SRR1 | ||
441 | rldicl r11,r10,48,1 /* clear MSR_EE */ | ||
442 | rotldi r10,r11,16 | ||
443 | mtspr SPRN_SRR1,r10 | ||
444 | ld r10,PACA_EXGEN+EX_R10(r13); /* restore registers */ | ||
445 | ld r11,PACA_EXGEN+EX_R11(r13); | ||
446 | mfspr r13,SPRN_SPRG_GEN_SCRATCH; | ||
447 | rfi | ||
448 | b . | ||
449 | |||
450 | /* | ||
451 | * This is called from 0x300 and 0x400 handlers after the prologs with | ||
452 | * r14 and r15 containing the fault address and error code, with the | ||
453 | * original values stashed away in the PACA | ||
454 | */ | ||
455 | storage_fault_common: | ||
456 | std r14,_DAR(r1) | ||
457 | std r15,_DSISR(r1) | ||
458 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
459 | mr r4,r14 | ||
460 | mr r5,r15 | ||
461 | ld r14,PACA_EXGEN+EX_R14(r13) | ||
462 | ld r15,PACA_EXGEN+EX_R15(r13) | ||
463 | INTS_RESTORE_HARD | ||
464 | bl .do_page_fault | ||
465 | cmpdi r3,0 | ||
466 | bne- 1f | ||
467 | b .ret_from_except_lite | ||
468 | 1: bl .save_nvgprs | ||
469 | mr r5,r3 | ||
470 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
471 | ld r4,_DAR(r1) | ||
472 | bl .bad_page_fault | ||
473 | b .ret_from_except | ||
474 | |||
475 | /* | ||
476 | * Alignment exception doesn't fit entirely in the 0x100 bytes so it | ||
477 | * continues here. | ||
478 | */ | ||
479 | alignment_more: | ||
480 | std r14,_DAR(r1) | ||
481 | std r15,_DSISR(r1) | ||
482 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
483 | ld r14,PACA_EXGEN+EX_R14(r13) | ||
484 | ld r15,PACA_EXGEN+EX_R15(r13) | ||
485 | bl .save_nvgprs | ||
486 | INTS_RESTORE_HARD | ||
487 | bl .alignment_exception | ||
488 | b .ret_from_except | ||
489 | |||
490 | /* | ||
491 | * We branch here from entry_64.S for the last stage of the exception | ||
492 | * return code path. MSR:EE is expected to be off at that point | ||
493 | */ | ||
494 | _GLOBAL(exception_return_book3e) | ||
495 | b 1f | ||
496 | |||
497 | /* This is the return from load_up_fpu fast path which could do with | ||
498 | * less GPR restores in fact, but for now we have a single return path | ||
499 | */ | ||
500 | .globl fast_exception_return | ||
501 | fast_exception_return: | ||
502 | wrteei 0 | ||
503 | 1: mr r0,r13 | ||
504 | ld r10,_MSR(r1) | ||
505 | REST_4GPRS(2, r1) | ||
506 | andi. r6,r10,MSR_PR | ||
507 | REST_2GPRS(6, r1) | ||
508 | beq 1f | ||
509 | ACCOUNT_CPU_USER_EXIT(r10, r11) | ||
510 | ld r0,GPR13(r1) | ||
511 | |||
512 | 1: stdcx. r0,0,r1 /* to clear the reservation */ | ||
513 | |||
514 | ld r8,_CCR(r1) | ||
515 | ld r9,_LINK(r1) | ||
516 | ld r10,_CTR(r1) | ||
517 | ld r11,_XER(r1) | ||
518 | mtcr r8 | ||
519 | mtlr r9 | ||
520 | mtctr r10 | ||
521 | mtxer r11 | ||
522 | REST_2GPRS(8, r1) | ||
523 | ld r10,GPR10(r1) | ||
524 | ld r11,GPR11(r1) | ||
525 | ld r12,GPR12(r1) | ||
526 | mtspr SPRN_SPRG_GEN_SCRATCH,r0 | ||
527 | |||
528 | std r10,PACA_EXGEN+EX_R10(r13); | ||
529 | std r11,PACA_EXGEN+EX_R11(r13); | ||
530 | ld r10,_NIP(r1) | ||
531 | ld r11,_MSR(r1) | ||
532 | ld r0,GPR0(r1) | ||
533 | ld r1,GPR1(r1) | ||
534 | mtspr SPRN_SRR0,r10 | ||
535 | mtspr SPRN_SRR1,r11 | ||
536 | ld r10,PACA_EXGEN+EX_R10(r13) | ||
537 | ld r11,PACA_EXGEN+EX_R11(r13) | ||
538 | mfspr r13,SPRN_SPRG_GEN_SCRATCH | ||
539 | rfi | ||
540 | |||
541 | /* | ||
542 | * Trampolines used when spotting a bad kernel stack pointer in | ||
543 | * the exception entry code. | ||
544 | * | ||
545 | * TODO: move some bits like SRR0 read to trampoline, pass PACA | ||
546 | * index around, etc... to handle crit & mcheck | ||
547 | */ | ||
548 | BAD_STACK_TRAMPOLINE(0x000) | ||
549 | BAD_STACK_TRAMPOLINE(0x100) | ||
550 | BAD_STACK_TRAMPOLINE(0x200) | ||
551 | BAD_STACK_TRAMPOLINE(0x300) | ||
552 | BAD_STACK_TRAMPOLINE(0x400) | ||
553 | BAD_STACK_TRAMPOLINE(0x500) | ||
554 | BAD_STACK_TRAMPOLINE(0x600) | ||
555 | BAD_STACK_TRAMPOLINE(0x700) | ||
556 | BAD_STACK_TRAMPOLINE(0x800) | ||
557 | BAD_STACK_TRAMPOLINE(0x900) | ||
558 | BAD_STACK_TRAMPOLINE(0x980) | ||
559 | BAD_STACK_TRAMPOLINE(0x9f0) | ||
560 | BAD_STACK_TRAMPOLINE(0xa00) | ||
561 | BAD_STACK_TRAMPOLINE(0xb00) | ||
562 | BAD_STACK_TRAMPOLINE(0xc00) | ||
563 | BAD_STACK_TRAMPOLINE(0xd00) | ||
564 | BAD_STACK_TRAMPOLINE(0xe00) | ||
565 | BAD_STACK_TRAMPOLINE(0xf00) | ||
566 | BAD_STACK_TRAMPOLINE(0xf20) | ||
567 | |||
568 | .globl bad_stack_book3e | ||
569 | bad_stack_book3e: | ||
570 | /* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */ | ||
571 | mfspr r10,SPRN_SRR0; /* read SRR0 before touching stack */ | ||
572 | ld r1,PACAEMERGSP(r13) | ||
573 | subi r1,r1,64+INT_FRAME_SIZE | ||
574 | std r10,_NIP(r1) | ||
575 | std r11,_MSR(r1) | ||
576 | ld r10,PACA_EXGEN+EX_R1(r13) /* FIXME for crit & mcheck */ | ||
577 | lwz r11,PACA_EXGEN+EX_CR(r13) /* FIXME for crit & mcheck */ | ||
578 | std r10,GPR1(r1) | ||
579 | std r11,_CCR(r1) | ||
580 | mfspr r10,SPRN_DEAR | ||
581 | mfspr r11,SPRN_ESR | ||
582 | std r10,_DAR(r1) | ||
583 | std r11,_DSISR(r1) | ||
584 | std r0,GPR0(r1); /* save r0 in stackframe */ \ | ||
585 | std r2,GPR2(r1); /* save r2 in stackframe */ \ | ||
586 | SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ | ||
587 | SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ | ||
588 | std r9,GPR9(r1); /* save r9 in stackframe */ \ | ||
589 | ld r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */ \ | ||
590 | ld r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */ \ | ||
591 | mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \ | ||
592 | std r3,GPR10(r1); /* save r10 to stackframe */ \ | ||
593 | std r4,GPR11(r1); /* save r11 to stackframe */ \ | ||
594 | std r12,GPR12(r1); /* save r12 in stackframe */ \ | ||
595 | std r5,GPR13(r1); /* save it to stackframe */ \ | ||
596 | mflr r10 | ||
597 | mfctr r11 | ||
598 | mfxer r12 | ||
599 | std r10,_LINK(r1) | ||
600 | std r11,_CTR(r1) | ||
601 | std r12,_XER(r1) | ||
602 | SAVE_10GPRS(14,r1) | ||
603 | SAVE_8GPRS(24,r1) | ||
604 | lhz r12,PACA_TRAP_SAVE(r13) | ||
605 | std r12,_TRAP(r1) | ||
606 | addi r11,r1,INT_FRAME_SIZE | ||
607 | std r11,0(r1) | ||
608 | li r12,0 | ||
609 | std r12,0(r11) | ||
610 | ld r2,PACATOC(r13) | ||
611 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
612 | bl .kernel_bad_stack | ||
613 | b 1b | ||
614 | |||
615 | /* | ||
616 | * Setup the initial TLB for a core. This current implementation | ||
617 | * assume that whatever we are running off will not conflict with | ||
618 | * the new mapping at PAGE_OFFSET. | ||
619 | * We also make various assumptions about the processor we run on, | ||
620 | * this might have to be made more flexible based on the content | ||
621 | * of MMUCFG and friends. | ||
622 | */ | ||
623 | _GLOBAL(initial_tlb_book3e) | ||
624 | |||
625 | /* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the | ||
626 | * kernel linear mapping. We also set MAS8 once for all here though | ||
627 | * that will have to be made dependent on whether we are running under | ||
628 | * a hypervisor I suppose. | ||
629 | */ | ||
630 | li r3,MAS0_HES | MAS0_WQ_ALLWAYS | ||
631 | mtspr SPRN_MAS0,r3 | ||
632 | lis r3,(MAS1_VALID | MAS1_IPROT)@h | ||
633 | ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT | ||
634 | mtspr SPRN_MAS1,r3 | ||
635 | LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET | MAS2_M) | ||
636 | mtspr SPRN_MAS2,r3 | ||
637 | li r3,MAS3_SR | MAS3_SW | MAS3_SX | ||
638 | mtspr SPRN_MAS7_MAS3,r3 | ||
639 | li r3,0 | ||
640 | mtspr SPRN_MAS8,r3 | ||
641 | |||
642 | /* Write the TLB entry */ | ||
643 | tlbwe | ||
644 | |||
645 | /* Now we branch the new virtual address mapped by this entry */ | ||
646 | LOAD_REG_IMMEDIATE(r3,1f) | ||
647 | mtctr r3 | ||
648 | bctr | ||
649 | |||
650 | 1: /* We are now running at PAGE_OFFSET, clean the TLB of everything | ||
651 | * else (XXX we should scan for bolted crap from the firmware too) | ||
652 | */ | ||
653 | PPC_TLBILX(0,0,0) | ||
654 | sync | ||
655 | isync | ||
656 | |||
657 | /* We translate LR and return */ | ||
658 | mflr r3 | ||
659 | tovirt(r3,r3) | ||
660 | mtlr r3 | ||
661 | blr | ||
662 | |||
663 | /* | ||
664 | * Main entry (boot CPU, thread 0) | ||
665 | * | ||
666 | * We enter here from head_64.S, possibly after the prom_init trampoline | ||
667 | * with r3 and r4 already saved to r31 and 30 respectively and in 64 bits | ||
668 | * mode. Anything else is as it was left by the bootloader | ||
669 | * | ||
670 | * Initial requirements of this port: | ||
671 | * | ||
672 | * - Kernel loaded at 0 physical | ||
673 | * - A good lump of memory mapped 0:0 by UTLB entry 0 | ||
674 | * - MSR:IS & MSR:DS set to 0 | ||
675 | * | ||
676 | * Note that some of the above requirements will be relaxed in the future | ||
677 | * as the kernel becomes smarter at dealing with different initial conditions | ||
678 | * but for now you have to be careful | ||
679 | */ | ||
680 | _GLOBAL(start_initialization_book3e) | ||
681 | mflr r28 | ||
682 | |||
683 | /* First, we need to setup some initial TLBs to map the kernel | ||
684 | * text, data and bss at PAGE_OFFSET. We don't have a real mode | ||
685 | * and always use AS 0, so we just set it up to match our link | ||
686 | * address and never use 0 based addresses. | ||
687 | */ | ||
688 | bl .initial_tlb_book3e | ||
689 | |||
690 | /* Init global core bits */ | ||
691 | bl .init_core_book3e | ||
692 | |||
693 | /* Init per-thread bits */ | ||
694 | bl .init_thread_book3e | ||
695 | |||
696 | /* Return to common init code */ | ||
697 | tovirt(r28,r28) | ||
698 | mtlr r28 | ||
699 | blr | ||
700 | |||
701 | |||
702 | /* | ||
703 | * Secondary core/processor entry | ||
704 | * | ||
705 | * This is entered for thread 0 of a secondary core, all other threads | ||
706 | * are expected to be stopped. It's similar to start_initialization_book3e | ||
707 | * except that it's generally entered from the holding loop in head_64.S | ||
708 | * after CPUs have been gathered by Open Firmware. | ||
709 | * | ||
710 | * We assume we are in 32 bits mode running with whatever TLB entry was | ||
711 | * set for us by the firmware or POR engine. | ||
712 | */ | ||
713 | _GLOBAL(book3e_secondary_core_init_tlb_set) | ||
714 | li r4,1 | ||
715 | b .generic_secondary_smp_init | ||
716 | |||
717 | _GLOBAL(book3e_secondary_core_init) | ||
718 | mflr r28 | ||
719 | |||
720 | /* Do we need to setup initial TLB entry ? */ | ||
721 | cmplwi r4,0 | ||
722 | bne 2f | ||
723 | |||
724 | /* Setup TLB for this core */ | ||
725 | bl .initial_tlb_book3e | ||
726 | |||
727 | /* We can return from the above running at a different | ||
728 | * address, so recalculate r2 (TOC) | ||
729 | */ | ||
730 | bl .relative_toc | ||
731 | |||
732 | /* Init global core bits */ | ||
733 | 2: bl .init_core_book3e | ||
734 | |||
735 | /* Init per-thread bits */ | ||
736 | 3: bl .init_thread_book3e | ||
737 | |||
738 | /* Return to common init code at proper virtual address. | ||
739 | * | ||
740 | * Due to various previous assumptions, we know we entered this | ||
741 | * function at either the final PAGE_OFFSET mapping or using a | ||
742 | * 1:1 mapping at 0, so we don't bother doing a complicated check | ||
743 | * here, we just ensure the return address has the right top bits. | ||
744 | * | ||
745 | * Note that if we ever want to be smarter about where we can be | ||
746 | * started from, we have to be careful that by the time we reach | ||
747 | * the code below we may already be running at a different location | ||
748 | * than the one we were called from since initial_tlb_book3e can | ||
749 | * have moved us already. | ||
750 | */ | ||
751 | cmpdi cr0,r28,0 | ||
752 | blt 1f | ||
753 | lis r3,PAGE_OFFSET@highest | ||
754 | sldi r3,r3,32 | ||
755 | or r28,r28,r3 | ||
756 | 1: mtlr r28 | ||
757 | blr | ||
758 | |||
759 | _GLOBAL(book3e_secondary_thread_init) | ||
760 | mflr r28 | ||
761 | b 3b | ||
762 | |||
763 | _STATIC(init_core_book3e) | ||
764 | /* Establish the interrupt vector base */ | ||
765 | LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e) | ||
766 | mtspr SPRN_IVPR,r3 | ||
767 | sync | ||
768 | blr | ||
769 | |||
770 | _STATIC(init_thread_book3e) | ||
771 | lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h | ||
772 | mtspr SPRN_EPCR,r3 | ||
773 | |||
774 | /* Make sure interrupts are off */ | ||
775 | wrteei 0 | ||
776 | |||
777 | /* disable watchdog and FIT and enable DEC interrupts */ | ||
778 | lis r3,TCR_DIE@h | ||
779 | mtspr SPRN_TCR,r3 | ||
780 | |||
781 | blr | ||
782 | |||
783 | |||
784 | |||
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 0552f01041ab..c38afdb45d7b 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -121,10 +121,11 @@ __run_at_load: | |||
121 | */ | 121 | */ |
122 | .globl __secondary_hold | 122 | .globl __secondary_hold |
123 | __secondary_hold: | 123 | __secondary_hold: |
124 | #ifndef CONFIG_PPC_BOOK3E | ||
124 | mfmsr r24 | 125 | mfmsr r24 |
125 | ori r24,r24,MSR_RI | 126 | ori r24,r24,MSR_RI |
126 | mtmsrd r24 /* RI on */ | 127 | mtmsrd r24 /* RI on */ |
127 | 128 | #endif | |
128 | /* Grab our physical cpu number */ | 129 | /* Grab our physical cpu number */ |
129 | mr r24,r3 | 130 | mr r24,r3 |
130 | 131 | ||
@@ -143,6 +144,7 @@ __secondary_hold: | |||
143 | ld r4,0(r4) /* deref function descriptor */ | 144 | ld r4,0(r4) /* deref function descriptor */ |
144 | mtctr r4 | 145 | mtctr r4 |
145 | mr r3,r24 | 146 | mr r3,r24 |
147 | li r4,0 | ||
146 | bctr | 148 | bctr |
147 | #else | 149 | #else |
148 | BUG_OPCODE | 150 | BUG_OPCODE |
@@ -163,21 +165,49 @@ exception_marker: | |||
163 | #include "exceptions-64s.S" | 165 | #include "exceptions-64s.S" |
164 | #endif | 166 | #endif |
165 | 167 | ||
168 | _GLOBAL(generic_secondary_thread_init) | ||
169 | mr r24,r3 | ||
170 | |||
171 | /* turn on 64-bit mode */ | ||
172 | bl .enable_64b_mode | ||
173 | |||
174 | /* get a valid TOC pointer, wherever we're mapped at */ | ||
175 | bl .relative_toc | ||
176 | |||
177 | #ifdef CONFIG_PPC_BOOK3E | ||
178 | /* Book3E initialization */ | ||
179 | mr r3,r24 | ||
180 | bl .book3e_secondary_thread_init | ||
181 | #endif | ||
182 | b generic_secondary_common_init | ||
166 | 183 | ||
167 | /* | 184 | /* |
168 | * On pSeries and most other platforms, secondary processors spin | 185 | * On pSeries and most other platforms, secondary processors spin |
169 | * in the following code. | 186 | * in the following code. |
170 | * At entry, r3 = this processor's number (physical cpu id) | 187 | * At entry, r3 = this processor's number (physical cpu id) |
188 | * | ||
189 | * On Book3E, r4 = 1 to indicate that the initial TLB entry for | ||
190 | * this core already exists (setup via some other mechanism such | ||
191 | * as SCOM before entry). | ||
171 | */ | 192 | */ |
172 | _GLOBAL(generic_secondary_smp_init) | 193 | _GLOBAL(generic_secondary_smp_init) |
173 | mr r24,r3 | 194 | mr r24,r3 |
174 | 195 | mr r25,r4 | |
196 | |||
175 | /* turn on 64-bit mode */ | 197 | /* turn on 64-bit mode */ |
176 | bl .enable_64b_mode | 198 | bl .enable_64b_mode |
177 | 199 | ||
178 | /* get the TOC pointer (real address) */ | 200 | /* get a valid TOC pointer, wherever we're mapped at */ |
179 | bl .relative_toc | 201 | bl .relative_toc |
180 | 202 | ||
203 | #ifdef CONFIG_PPC_BOOK3E | ||
204 | /* Book3E initialization */ | ||
205 | mr r3,r24 | ||
206 | mr r4,r25 | ||
207 | bl .book3e_secondary_core_init | ||
208 | #endif | ||
209 | |||
210 | generic_secondary_common_init: | ||
181 | /* Set up a paca value for this processor. Since we have the | 211 | /* Set up a paca value for this processor. Since we have the |
182 | * physical cpu id in r24, we need to search the pacas to find | 212 | * physical cpu id in r24, we need to search the pacas to find |
183 | * which logical id maps to our physical one. | 213 | * which logical id maps to our physical one. |
@@ -196,6 +226,11 @@ _GLOBAL(generic_secondary_smp_init) | |||
196 | b .kexec_wait /* next kernel might do better */ | 226 | b .kexec_wait /* next kernel might do better */ |
197 | 227 | ||
198 | 2: mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG */ | 228 | 2: mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG */ |
229 | #ifdef CONFIG_PPC_BOOK3E | ||
230 | addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */ | ||
231 | mtspr SPRN_SPRG_TLB_EXFRAME,r12 | ||
232 | #endif | ||
233 | |||
199 | /* From now on, r24 is expected to be logical cpuid */ | 234 | /* From now on, r24 is expected to be logical cpuid */ |
200 | mr r24,r5 | 235 | mr r24,r5 |
201 | 3: HMT_LOW | 236 | 3: HMT_LOW |
@@ -231,6 +266,7 @@ _GLOBAL(generic_secondary_smp_init) | |||
231 | * Turn the MMU off. | 266 | * Turn the MMU off. |
232 | * Assumes we're mapped EA == RA if the MMU is on. | 267 | * Assumes we're mapped EA == RA if the MMU is on. |
233 | */ | 268 | */ |
269 | #ifdef CONFIG_PPC_BOOK3S | ||
234 | _STATIC(__mmu_off) | 270 | _STATIC(__mmu_off) |
235 | mfmsr r3 | 271 | mfmsr r3 |
236 | andi. r0,r3,MSR_IR|MSR_DR | 272 | andi. r0,r3,MSR_IR|MSR_DR |
@@ -242,6 +278,7 @@ _STATIC(__mmu_off) | |||
242 | sync | 278 | sync |
243 | rfid | 279 | rfid |
244 | b . /* prevent speculative execution */ | 280 | b . /* prevent speculative execution */ |
281 | #endif | ||
245 | 282 | ||
246 | 283 | ||
247 | /* | 284 | /* |
@@ -279,6 +316,10 @@ _GLOBAL(__start_initialization_multiplatform) | |||
279 | mr r31,r3 | 316 | mr r31,r3 |
280 | mr r30,r4 | 317 | mr r30,r4 |
281 | 318 | ||
319 | #ifdef CONFIG_PPC_BOOK3E | ||
320 | bl .start_initialization_book3e | ||
321 | b .__after_prom_start | ||
322 | #else | ||
282 | /* Setup some critical 970 SPRs before switching MMU off */ | 323 | /* Setup some critical 970 SPRs before switching MMU off */ |
283 | mfspr r0,SPRN_PVR | 324 | mfspr r0,SPRN_PVR |
284 | srwi r0,r0,16 | 325 | srwi r0,r0,16 |
@@ -296,6 +337,7 @@ _GLOBAL(__start_initialization_multiplatform) | |||
296 | /* Switch off MMU if not already off */ | 337 | /* Switch off MMU if not already off */ |
297 | bl .__mmu_off | 338 | bl .__mmu_off |
298 | b .__after_prom_start | 339 | b .__after_prom_start |
340 | #endif /* CONFIG_PPC_BOOK3E */ | ||
299 | 341 | ||
300 | _INIT_STATIC(__boot_from_prom) | 342 | _INIT_STATIC(__boot_from_prom) |
301 | #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE | 343 | #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE |
@@ -358,10 +400,16 @@ _STATIC(__after_prom_start) | |||
358 | * Note: This process overwrites the OF exception vectors. | 400 | * Note: This process overwrites the OF exception vectors. |
359 | */ | 401 | */ |
360 | li r3,0 /* target addr */ | 402 | li r3,0 /* target addr */ |
403 | #ifdef CONFIG_PPC_BOOK3E | ||
404 | tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */ | ||
405 | #endif | ||
361 | mr. r4,r26 /* In some cases the loader may */ | 406 | mr. r4,r26 /* In some cases the loader may */ |
362 | beq 9f /* have already put us at zero */ | 407 | beq 9f /* have already put us at zero */ |
363 | li r6,0x100 /* Start offset, the first 0x100 */ | 408 | li r6,0x100 /* Start offset, the first 0x100 */ |
364 | /* bytes were copied earlier. */ | 409 | /* bytes were copied earlier. */ |
410 | #ifdef CONFIG_PPC_BOOK3E | ||
411 | tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */ | ||
412 | #endif | ||
365 | 413 | ||
366 | #ifdef CONFIG_CRASH_DUMP | 414 | #ifdef CONFIG_CRASH_DUMP |
367 | /* | 415 | /* |
@@ -507,6 +555,9 @@ _GLOBAL(pmac_secondary_start) | |||
507 | * r13 = paca virtual address | 555 | * r13 = paca virtual address |
508 | * SPRG_PACA = paca virtual address | 556 | * SPRG_PACA = paca virtual address |
509 | */ | 557 | */ |
558 | .section ".text"; | ||
559 | .align 2 ; | ||
560 | |||
510 | .globl __secondary_start | 561 | .globl __secondary_start |
511 | __secondary_start: | 562 | __secondary_start: |
512 | /* Set thread priority to MEDIUM */ | 563 | /* Set thread priority to MEDIUM */ |
@@ -543,7 +594,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | |||
543 | 594 | ||
544 | mtspr SPRN_SRR0,r3 | 595 | mtspr SPRN_SRR0,r3 |
545 | mtspr SPRN_SRR1,r4 | 596 | mtspr SPRN_SRR1,r4 |
546 | rfid | 597 | RFI |
547 | b . /* prevent speculative execution */ | 598 | b . /* prevent speculative execution */ |
548 | 599 | ||
549 | /* | 600 | /* |
@@ -564,11 +615,16 @@ _GLOBAL(start_secondary_prolog) | |||
564 | */ | 615 | */ |
565 | _GLOBAL(enable_64b_mode) | 616 | _GLOBAL(enable_64b_mode) |
566 | mfmsr r11 /* grab the current MSR */ | 617 | mfmsr r11 /* grab the current MSR */ |
618 | #ifdef CONFIG_PPC_BOOK3E | ||
619 | oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ | ||
620 | mtmsr r11 | ||
621 | #else /* CONFIG_PPC_BOOK3E */ | ||
567 | li r12,(MSR_SF | MSR_ISF)@highest | 622 | li r12,(MSR_SF | MSR_ISF)@highest |
568 | sldi r12,r12,48 | 623 | sldi r12,r12,48 |
569 | or r11,r11,r12 | 624 | or r11,r11,r12 |
570 | mtmsrd r11 | 625 | mtmsrd r11 |
571 | isync | 626 | isync |
627 | #endif | ||
572 | blr | 628 | blr |
573 | 629 | ||
574 | /* | 630 | /* |
@@ -612,9 +668,11 @@ _INIT_STATIC(start_here_multiplatform) | |||
612 | bdnz 3b | 668 | bdnz 3b |
613 | 4: | 669 | 4: |
614 | 670 | ||
671 | #ifndef CONFIG_PPC_BOOK3E | ||
615 | mfmsr r6 | 672 | mfmsr r6 |
616 | ori r6,r6,MSR_RI | 673 | ori r6,r6,MSR_RI |
617 | mtmsrd r6 /* RI on */ | 674 | mtmsrd r6 /* RI on */ |
675 | #endif | ||
618 | 676 | ||
619 | #ifdef CONFIG_RELOCATABLE | 677 | #ifdef CONFIG_RELOCATABLE |
620 | /* Save the physical address we're running at in kernstart_addr */ | 678 | /* Save the physical address we're running at in kernstart_addr */ |
@@ -647,7 +705,7 @@ _INIT_STATIC(start_here_multiplatform) | |||
647 | ld r4,PACAKMSR(r13) | 705 | ld r4,PACAKMSR(r13) |
648 | mtspr SPRN_SRR0,r3 | 706 | mtspr SPRN_SRR0,r3 |
649 | mtspr SPRN_SRR1,r4 | 707 | mtspr SPRN_SRR1,r4 |
650 | rfid | 708 | RFI |
651 | b . /* prevent speculative execution */ | 709 | b . /* prevent speculative execution */ |
652 | 710 | ||
653 | /* This is where all platforms converge execution */ | 711 | /* This is where all platforms converge execution */ |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 65aced7b833a..87df51720641 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -454,6 +454,24 @@ static void __init irqstack_early_init(void) | |||
454 | #define irqstack_early_init() | 454 | #define irqstack_early_init() |
455 | #endif | 455 | #endif |
456 | 456 | ||
457 | #ifdef CONFIG_PPC_BOOK3E | ||
458 | static void __init exc_lvl_early_init(void) | ||
459 | { | ||
460 | unsigned int i; | ||
461 | |||
462 | for_each_possible_cpu(i) { | ||
463 | critirq_ctx[i] = (struct thread_info *) | ||
464 | __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | ||
465 | dbgirq_ctx[i] = (struct thread_info *) | ||
466 | __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | ||
467 | mcheckirq_ctx[i] = (struct thread_info *) | ||
468 | __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | ||
469 | } | ||
470 | } | ||
471 | #else | ||
472 | #define exc_lvl_early_init() | ||
473 | #endif | ||
474 | |||
457 | /* | 475 | /* |
458 | * Stack space used when we detect a bad kernel stack pointer, and | 476 | * Stack space used when we detect a bad kernel stack pointer, and |
459 | * early in SMP boots before relocation is enabled. | 477 | * early in SMP boots before relocation is enabled. |
@@ -513,6 +531,7 @@ void __init setup_arch(char **cmdline_p) | |||
513 | init_mm.brk = klimit; | 531 | init_mm.brk = klimit; |
514 | 532 | ||
515 | irqstack_early_init(); | 533 | irqstack_early_init(); |
534 | exc_lvl_early_init(); | ||
516 | emergency_stack_init(); | 535 | emergency_stack_init(); |
517 | 536 | ||
518 | #ifdef CONFIG_PPC_STD_MMU_64 | 537 | #ifdef CONFIG_PPC_STD_MMU_64 |