diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 23 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_32.S | 125 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_40x.S | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_booke.h | 23 |
4 files changed, 174 insertions, 3 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index ec9228d687b0..8655c7670350 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -52,6 +52,10 @@ | |||
52 | #include <asm/iseries/alpaca.h> | 52 | #include <asm/iseries/alpaca.h> |
53 | #endif | 53 | #endif |
54 | 54 | ||
55 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) | ||
56 | #include "head_booke.h" | ||
57 | #endif | ||
58 | |||
55 | int main(void) | 59 | int main(void) |
56 | { | 60 | { |
57 | DEFINE(THREAD, offsetof(struct task_struct, thread)); | 61 | DEFINE(THREAD, offsetof(struct task_struct, thread)); |
@@ -242,6 +246,25 @@ int main(void) | |||
242 | DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8); | 246 | DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8); |
243 | #endif /* CONFIG_PPC64 */ | 247 | #endif /* CONFIG_PPC64 */ |
244 | 248 | ||
249 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) | ||
250 | DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE); | ||
251 | DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); | ||
252 | /* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */ | ||
253 | DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); | ||
254 | DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1)); | ||
255 | DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2)); | ||
256 | DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3)); | ||
257 | DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6)); | ||
258 | DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7)); | ||
259 | DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0)); | ||
260 | DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1)); | ||
261 | DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0)); | ||
262 | DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1)); | ||
263 | DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0)); | ||
264 | DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1)); | ||
265 | DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit)); | ||
266 | #endif | ||
267 | |||
245 | DEFINE(CLONE_VM, CLONE_VM); | 268 | DEFINE(CLONE_VM, CLONE_VM); |
246 | DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); | 269 | DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); |
247 | 270 | ||
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index c94aba54b5dd..fe21674d4f06 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S | |||
@@ -46,14 +46,52 @@ | |||
46 | #ifdef CONFIG_BOOKE | 46 | #ifdef CONFIG_BOOKE |
47 | .globl mcheck_transfer_to_handler | 47 | .globl mcheck_transfer_to_handler |
48 | mcheck_transfer_to_handler: | 48 | mcheck_transfer_to_handler: |
49 | b transfer_to_handler_full | 49 | mfspr r0,SPRN_DSRR0 |
50 | stw r0,_DSRR0(r11) | ||
51 | mfspr r0,SPRN_DSRR1 | ||
52 | stw r0,_DSRR1(r11) | ||
53 | /* fall through */ | ||
50 | 54 | ||
51 | .globl debug_transfer_to_handler | 55 | .globl debug_transfer_to_handler |
52 | debug_transfer_to_handler: | 56 | debug_transfer_to_handler: |
53 | b transfer_to_handler_full | 57 | mfspr r0,SPRN_CSRR0 |
58 | stw r0,_CSRR0(r11) | ||
59 | mfspr r0,SPRN_CSRR1 | ||
60 | stw r0,_CSRR1(r11) | ||
61 | /* fall through */ | ||
54 | 62 | ||
55 | .globl crit_transfer_to_handler | 63 | .globl crit_transfer_to_handler |
56 | crit_transfer_to_handler: | 64 | crit_transfer_to_handler: |
65 | #ifdef CONFIG_FSL_BOOKE | ||
66 | mfspr r0,SPRN_MAS0 | ||
67 | stw r0,MAS0(r11) | ||
68 | mfspr r0,SPRN_MAS1 | ||
69 | stw r0,MAS1(r11) | ||
70 | mfspr r0,SPRN_MAS2 | ||
71 | stw r0,MAS2(r11) | ||
72 | mfspr r0,SPRN_MAS3 | ||
73 | stw r0,MAS3(r11) | ||
74 | mfspr r0,SPRN_MAS6 | ||
75 | stw r0,MAS6(r11) | ||
76 | #ifdef CONFIG_PHYS_64BIT | ||
77 | mfspr r0,SPRN_MAS7 | ||
78 | stw r0,MAS7(r11) | ||
79 | #endif /* CONFIG_PHYS_64BIT */ | ||
80 | #endif /* CONFIG_FSL_BOOKE */ | ||
81 | #ifdef CONFIG_44x | ||
82 | mfspr r0,SPRN_MMUCR | ||
83 | stw r0,MMUCR(r11) | ||
84 | #endif | ||
85 | mfspr r0,SPRN_SRR0 | ||
86 | stw r0,_SRR0(r11) | ||
87 | mfspr r0,SPRN_SRR1 | ||
88 | stw r0,_SRR1(r11) | ||
89 | |||
90 | mfspr r8,SPRN_SPRG3 | ||
91 | lwz r0,KSP_LIMIT(r8) | ||
92 | stw r0,SAVED_KSP_LIMIT(r11) | ||
93 | rlwimi r0,r1,0,0,(31-THREAD_SHIFT) | ||
94 | stw r0,KSP_LIMIT(r8) | ||
57 | /* fall through */ | 95 | /* fall through */ |
58 | #endif | 96 | #endif |
59 | 97 | ||
@@ -64,6 +102,16 @@ crit_transfer_to_handler: | |||
64 | stw r0,GPR10(r11) | 102 | stw r0,GPR10(r11) |
65 | lwz r0,crit_r11@l(0) | 103 | lwz r0,crit_r11@l(0) |
66 | stw r0,GPR11(r11) | 104 | stw r0,GPR11(r11) |
105 | mfspr r0,SPRN_SRR0 | ||
106 | stw r0,crit_srr0@l(0) | ||
107 | mfspr r0,SPRN_SRR1 | ||
108 | stw r0,crit_srr1@l(0) | ||
109 | |||
110 | mfspr r8,SPRN_SPRG3 | ||
111 | lwz r0,KSP_LIMIT(r8) | ||
112 | stw r0,saved_ksp_limit@l(0) | ||
113 | rlwimi r0,r1,0,0,(31-THREAD_SHIFT) | ||
114 | stw r0,KSP_LIMIT(r8) | ||
67 | /* fall through */ | 115 | /* fall through */ |
68 | #endif | 116 | #endif |
69 | 117 | ||
@@ -854,17 +902,90 @@ exc_exit_restart_end: | |||
854 | exc_lvl_rfi; \ | 902 | exc_lvl_rfi; \ |
855 | b .; /* prevent prefetch past exc_lvl_rfi */ | 903 | b .; /* prevent prefetch past exc_lvl_rfi */ |
856 | 904 | ||
905 | #define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \ | ||
906 | lwz r9,_##exc_lvl_srr0(r1); \ | ||
907 | lwz r10,_##exc_lvl_srr1(r1); \ | ||
908 | mtspr SPRN_##exc_lvl_srr0,r9; \ | ||
909 | mtspr SPRN_##exc_lvl_srr1,r10; | ||
910 | |||
911 | #if defined(CONFIG_FSL_BOOKE) | ||
912 | #ifdef CONFIG_PHYS_64BIT | ||
913 | #define RESTORE_MAS7 \ | ||
914 | lwz r11,MAS7(r1); \ | ||
915 | mtspr SPRN_MAS7,r11; | ||
916 | #else | ||
917 | #define RESTORE_MAS7 | ||
918 | #endif /* CONFIG_PHYS_64BIT */ | ||
919 | #define RESTORE_MMU_REGS \ | ||
920 | lwz r9,MAS0(r1); \ | ||
921 | lwz r10,MAS1(r1); \ | ||
922 | lwz r11,MAS2(r1); \ | ||
923 | mtspr SPRN_MAS0,r9; \ | ||
924 | lwz r9,MAS3(r1); \ | ||
925 | mtspr SPRN_MAS1,r10; \ | ||
926 | lwz r10,MAS6(r1); \ | ||
927 | mtspr SPRN_MAS2,r11; \ | ||
928 | mtspr SPRN_MAS3,r9; \ | ||
929 | mtspr SPRN_MAS6,r10; \ | ||
930 | RESTORE_MAS7; | ||
931 | #elif defined(CONFIG_44x) | ||
932 | #define RESTORE_MMU_REGS \ | ||
933 | lwz r9,MMUCR(r1); \ | ||
934 | mtspr SPRN_MMUCR,r9; | ||
935 | #else | ||
936 | #define RESTORE_MMU_REGS | ||
937 | #endif | ||
938 | |||
939 | #ifdef CONFIG_40x | ||
857 | .globl ret_from_crit_exc | 940 | .globl ret_from_crit_exc |
858 | ret_from_crit_exc: | 941 | ret_from_crit_exc: |
942 | mfspr r9,SPRN_SPRG3 | ||
943 | lis r10,saved_ksp_limit@ha; | ||
944 | lwz r10,saved_ksp_limit@l(r10); | ||
945 | tovirt(r9,r9); | ||
946 | stw r10,KSP_LIMIT(r9) | ||
947 | lis r9,crit_srr0@ha; | ||
948 | lwz r9,crit_srr0@l(r9); | ||
949 | lis r10,crit_srr1@ha; | ||
950 | lwz r10,crit_srr1@l(r10); | ||
951 | mtspr SPRN_SRR0,r9; | ||
952 | mtspr SPRN_SRR1,r10; | ||
859 | RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI) | 953 | RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI) |
954 | #endif /* CONFIG_40x */ | ||
860 | 955 | ||
861 | #ifdef CONFIG_BOOKE | 956 | #ifdef CONFIG_BOOKE |
957 | .globl ret_from_crit_exc | ||
958 | ret_from_crit_exc: | ||
959 | mfspr r9,SPRN_SPRG3 | ||
960 | lwz r10,SAVED_KSP_LIMIT(r1) | ||
961 | stw r10,KSP_LIMIT(r9) | ||
962 | RESTORE_xSRR(SRR0,SRR1); | ||
963 | RESTORE_MMU_REGS; | ||
964 | RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI) | ||
965 | |||
862 | .globl ret_from_debug_exc | 966 | .globl ret_from_debug_exc |
863 | ret_from_debug_exc: | 967 | ret_from_debug_exc: |
968 | mfspr r9,SPRN_SPRG3 | ||
969 | lwz r10,SAVED_KSP_LIMIT(r1) | ||
970 | stw r10,KSP_LIMIT(r9) | ||
971 | lwz r9,THREAD_INFO-THREAD(r9) | ||
972 | rlwinm r10,r1,0,0,(31-THREAD_SHIFT) | ||
973 | lwz r10,TI_PREEMPT(r10) | ||
974 | stw r10,TI_PREEMPT(r9) | ||
975 | RESTORE_xSRR(SRR0,SRR1); | ||
976 | RESTORE_xSRR(CSRR0,CSRR1); | ||
977 | RESTORE_MMU_REGS; | ||
864 | RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI) | 978 | RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI) |
865 | 979 | ||
866 | .globl ret_from_mcheck_exc | 980 | .globl ret_from_mcheck_exc |
867 | ret_from_mcheck_exc: | 981 | ret_from_mcheck_exc: |
982 | mfspr r9,SPRN_SPRG3 | ||
983 | lwz r10,SAVED_KSP_LIMIT(r1) | ||
984 | stw r10,KSP_LIMIT(r9) | ||
985 | RESTORE_xSRR(SRR0,SRR1); | ||
986 | RESTORE_xSRR(CSRR0,CSRR1); | ||
987 | RESTORE_xSRR(DSRR0,DSRR1); | ||
988 | RESTORE_MMU_REGS; | ||
868 | RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI) | 989 | RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI) |
869 | #endif /* CONFIG_BOOKE */ | 990 | #endif /* CONFIG_BOOKE */ |
870 | 991 | ||
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index f2cf60d38f78..56d8e5d90c5b 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S | |||
@@ -93,6 +93,12 @@ _ENTRY(crit_r10) | |||
93 | .space 4 | 93 | .space 4 |
94 | _ENTRY(crit_r11) | 94 | _ENTRY(crit_r11) |
95 | .space 4 | 95 | .space 4 |
96 | _ENTRY(crit_srr0) | ||
97 | .space 4 | ||
98 | _ENTRY(crit_srr1) | ||
99 | .space 4 | ||
100 | _ENTRY(saved_ksp_limit) | ||
101 | .space 4 | ||
96 | 102 | ||
97 | /* | 103 | /* |
98 | * Exception vector entry code. This code runs with address translation | 104 | * Exception vector entry code. This code runs with address translation |
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index b0874d228eaf..f277fade1932 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h | |||
@@ -72,7 +72,7 @@ | |||
72 | #define DEBUG_STACK_BASE dbgirq_ctx | 72 | #define DEBUG_STACK_BASE dbgirq_ctx |
73 | #define DEBUG_SPRG SPRN_SPRG6W | 73 | #define DEBUG_SPRG SPRN_SPRG6W |
74 | 74 | ||
75 | #define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE) | 75 | #define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE) |
76 | 76 | ||
77 | #ifdef CONFIG_SMP | 77 | #ifdef CONFIG_SMP |
78 | #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ | 78 | #define BOOKE_LOAD_EXC_LEVEL_STACK(level) \ |
@@ -376,4 +376,25 @@ label: | |||
376 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | 376 | addi r3,r1,STACK_FRAME_OVERHEAD; \ |
377 | EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception) | 377 | EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception) |
378 | 378 | ||
379 | #ifndef __ASSEMBLY__ | ||
380 | struct exception_regs { | ||
381 | unsigned long mas0; | ||
382 | unsigned long mas1; | ||
383 | unsigned long mas2; | ||
384 | unsigned long mas3; | ||
385 | unsigned long mas6; | ||
386 | unsigned long mas7; | ||
387 | unsigned long srr0; | ||
388 | unsigned long srr1; | ||
389 | unsigned long csrr0; | ||
390 | unsigned long csrr1; | ||
391 | unsigned long dsrr0; | ||
392 | unsigned long dsrr1; | ||
393 | unsigned long saved_ksp_limit; | ||
394 | }; | ||
395 | |||
396 | /* ensure this structure is always sized to a multiple of the stack alignment */ | ||
397 | #define STACK_EXC_LVL_FRAME_SIZE _ALIGN_UP(sizeof (struct exception_regs), 16) | ||
398 | |||
399 | #endif /* __ASSEMBLY__ */ | ||
379 | #endif /* __HEAD_BOOKE_H__ */ | 400 | #endif /* __HEAD_BOOKE_H__ */ |