diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/Makefile | 18 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 262 | ||||
-rw-r--r-- | arch/powerpc/kernel/fpu.S | 133 | ||||
-rw-r--r-- | arch/powerpc/kernel/head.S | 1545 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_44x.S | 778 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_4xx.S | 1016 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 2011 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_8xx.S | 860 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_fsl_booke.S | 1058 | ||||
-rw-r--r-- | arch/powerpc/kernel/idle_6xx.S | 233 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 724 | ||||
-rw-r--r-- | arch/powerpc/kernel/semaphore.c | 135 | ||||
-rw-r--r-- | arch/powerpc/kernel/traps.c | 1047 | ||||
-rw-r--r-- | arch/powerpc/kernel/vector.S | 197 | ||||
-rw-r--r-- | arch/powerpc/kernel/vmlinux.lds | 174 | ||||
-rw-r--r-- | arch/powerpc/kernel/vmlinux.lds.S | 172 |
16 files changed, 10363 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile new file mode 100644 index 000000000000..62c4a51a23d7 --- /dev/null +++ b/arch/powerpc/kernel/Makefile | |||
@@ -0,0 +1,18 @@ | |||
1 | # | ||
2 | # Makefile for the linux kernel. | ||
3 | # | ||
4 | |||
5 | extra-$(CONFIG_PPC_STD_MMU) := head.o | ||
6 | extra_$(CONFIG_PPC64) := head_64.o | ||
7 | extra-$(CONFIG_40x) := head_4xx.o | ||
8 | extra-$(CONFIG_44x) := head_44x.o | ||
9 | extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o | ||
10 | extra-$(CONFIG_8xx) := head_8xx.o | ||
11 | extra-$(CONFIG_6xx) += idle_6xx.o | ||
12 | extra-$(CONFIG_POWER4) += idle_power4.o | ||
13 | extra-$(CONFIG_PPC_FPU) += fpu.o | ||
14 | extra-y += vmlinux.lds | ||
15 | |||
16 | obj-y := semaphore.o traps.o process.o | ||
17 | |||
18 | obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o | ||
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c new file mode 100644 index 000000000000..16cf0b7ee2b7 --- /dev/null +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -0,0 +1,262 @@ | |||
1 | /* | ||
2 | * This program is used to generate definitions needed by | ||
3 | * assembly language modules. | ||
4 | * | ||
5 | * We use the technique used in the OSF Mach kernel code: | ||
6 | * generate asm statements containing #defines, | ||
7 | * compile this file to assembler, and then extract the | ||
8 | * #defines from the assembly-language output. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/config.h> | ||
17 | #include <linux/signal.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/ptrace.h> | ||
24 | #include <linux/suspend.h> | ||
25 | #include <linux/mman.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/time.h> | ||
28 | #include <linux/hardirq.h> | ||
29 | #include <asm/io.h> | ||
30 | #include <asm/page.h> | ||
31 | #include <asm/pgtable.h> | ||
32 | #include <asm/processor.h> | ||
33 | |||
34 | #include <asm/cputable.h> | ||
35 | #include <asm/thread_info.h> | ||
36 | #ifdef CONFIG_PPC64 | ||
37 | #include <asm/paca.h> | ||
38 | #include <asm/lppaca.h> | ||
39 | #include <asm/iSeries/HvLpEvent.h> | ||
40 | #include <asm/rtas.h> | ||
41 | #include <asm/cache.h> | ||
42 | #include <asm/systemcfg.h> | ||
43 | #include <asm/compat.h> | ||
44 | #endif | ||
45 | |||
46 | #define DEFINE(sym, val) \ | ||
47 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
48 | |||
49 | #define BLANK() asm volatile("\n->" : : ) | ||
50 | |||
51 | int main(void) | ||
52 | { | ||
53 | /* thread struct on stack */ | ||
54 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | ||
55 | DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); | ||
56 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); | ||
57 | #ifdef CONFIG_PPC32 | ||
58 | DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); | ||
59 | #endif | ||
60 | #ifdef CONFIG_PPC64 | ||
61 | DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror)); | ||
62 | DEFINE(THREAD_SHIFT, THREAD_SHIFT); | ||
63 | #endif | ||
64 | DEFINE(THREAD_SIZE, THREAD_SIZE); | ||
65 | |||
66 | /* task_struct->thread */ | ||
67 | DEFINE(THREAD, offsetof(struct task_struct, thread)); | ||
68 | DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info)); | ||
69 | DEFINE(MM, offsetof(struct task_struct, mm)); | ||
70 | DEFINE(PTRACE, offsetof(struct task_struct, ptrace)); | ||
71 | DEFINE(KSP, offsetof(struct thread_struct, ksp)); | ||
72 | DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); | ||
73 | DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall)); | ||
74 | DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); | ||
75 | DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); | ||
76 | DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0])); | ||
77 | DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr)); | ||
78 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
79 | DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0)); | ||
80 | DEFINE(PT_PTRACED, PT_PTRACED); | ||
81 | #endif | ||
82 | #ifdef CONFIG_PPC64 | ||
83 | DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid)); | ||
84 | #endif | ||
85 | |||
86 | #ifdef CONFIG_ALTIVEC | ||
87 | DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0])); | ||
88 | DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); | ||
89 | DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr)); | ||
90 | DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); | ||
91 | #endif /* CONFIG_ALTIVEC */ | ||
92 | #ifdef CONFIG_SPE | ||
93 | DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0])); | ||
94 | DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc)); | ||
95 | DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr)); | ||
96 | DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe)); | ||
97 | #endif /* CONFIG_SPE */ | ||
98 | /* Interrupt register frame */ | ||
99 | DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD); | ||
100 | #ifndef CONFIG_PPC64 | ||
101 | DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); | ||
102 | #else | ||
103 | DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); | ||
104 | |||
105 | /* 288 = # of volatile regs, int & fp, for leaf routines */ | ||
106 | /* which do not stack a frame. See the PPC64 ABI. */ | ||
107 | DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288); | ||
108 | #endif | ||
109 | /* in fact we only use gpr0 - gpr9 and gpr20 - gpr23 */ | ||
110 | DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0])); | ||
111 | DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1])); | ||
112 | DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2])); | ||
113 | DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3])); | ||
114 | DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4])); | ||
115 | DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5])); | ||
116 | DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6])); | ||
117 | DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7])); | ||
118 | DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8])); | ||
119 | DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9])); | ||
120 | DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10])); | ||
121 | DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11])); | ||
122 | DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12])); | ||
123 | DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13])); | ||
124 | DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14])); | ||
125 | DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15])); | ||
126 | DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16])); | ||
127 | DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17])); | ||
128 | DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18])); | ||
129 | DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19])); | ||
130 | DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20])); | ||
131 | DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21])); | ||
132 | DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22])); | ||
133 | DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23])); | ||
134 | DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24])); | ||
135 | DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25])); | ||
136 | DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26])); | ||
137 | DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27])); | ||
138 | DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28])); | ||
139 | DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29])); | ||
140 | DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30])); | ||
141 | DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31])); | ||
142 | /* | ||
143 | * Note: these symbols include _ because they overlap with special | ||
144 | * register names | ||
145 | */ | ||
146 | DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip)); | ||
147 | DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr)); | ||
148 | DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr)); | ||
149 | DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link)); | ||
150 | DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr)); | ||
151 | DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq)); | ||
152 | DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer)); | ||
153 | DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); | ||
154 | DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); | ||
155 | /* The PowerPC 400-class & Book-E processors have neither the DAR nor the DSISR | ||
156 | * SPRs. Hence, we overload them to hold the similar DEAR and ESR SPRs | ||
157 | * for such processors. For critical interrupts we use them to | ||
158 | * hold SRR0 and SRR1. | ||
159 | */ | ||
160 | DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); | ||
161 | DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); | ||
162 | DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3)); | ||
163 | DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result)); | ||
164 | DEFINE(TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap)); | ||
165 | DEFINE(CLONE_VM, CLONE_VM); | ||
166 | DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); | ||
167 | DEFINE(MM_PGD, offsetof(struct mm_struct, pgd)); | ||
168 | |||
169 | /* About the CPU features table */ | ||
170 | DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec)); | ||
171 | DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask)); | ||
172 | DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value)); | ||
173 | DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); | ||
174 | DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); | ||
175 | |||
176 | #ifdef CONFIG_PPC64 | ||
177 | DEFINE(MM, offsetof(struct task_struct, mm)); | ||
178 | DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); | ||
179 | |||
180 | DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size)); | ||
181 | DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size)); | ||
182 | DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page)); | ||
183 | DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size)); | ||
184 | DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size)); | ||
185 | DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); | ||
186 | DEFINE(PLATFORM, offsetof(struct systemcfg, platform)); | ||
187 | |||
188 | /* paca */ | ||
189 | DEFINE(PACA_SIZE, sizeof(struct paca_struct)); | ||
190 | DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); | ||
191 | DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start)); | ||
192 | DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); | ||
193 | DEFINE(PACACURRENT, offsetof(struct paca_struct, __current)); | ||
194 | DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr)); | ||
195 | DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real)); | ||
196 | DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr)); | ||
197 | DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); | ||
198 | DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); | ||
199 | DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); | ||
200 | DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled)); | ||
201 | DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); | ||
202 | DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); | ||
203 | DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); | ||
204 | #ifdef CONFIG_HUGETLB_PAGE | ||
205 | DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); | ||
206 | DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); | ||
207 | #endif /* CONFIG_HUGETLB_PAGE */ | ||
208 | DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr)); | ||
209 | DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); | ||
210 | DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); | ||
211 | DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); | ||
212 | DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi)); | ||
213 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); | ||
214 | DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca)); | ||
215 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); | ||
216 | DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); | ||
217 | DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); | ||
218 | DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); | ||
219 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); | ||
220 | |||
221 | /* RTAS */ | ||
222 | DEFINE(RTASBASE, offsetof(struct rtas_t, base)); | ||
223 | DEFINE(RTASENTRY, offsetof(struct rtas_t, entry)); | ||
224 | |||
225 | DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap)); | ||
226 | DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe)); | ||
227 | |||
228 | /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */ | ||
229 | DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); | ||
230 | DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); | ||
231 | |||
232 | /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */ | ||
233 | DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)); | ||
234 | DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8); | ||
235 | |||
236 | /* systemcfg offsets for use by vdso */ | ||
237 | DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct systemcfg, tb_orig_stamp)); | ||
238 | DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct systemcfg, tb_ticks_per_sec)); | ||
239 | DEFINE(CFG_TB_TO_XS, offsetof(struct systemcfg, tb_to_xs)); | ||
240 | DEFINE(CFG_STAMP_XSEC, offsetof(struct systemcfg, stamp_xsec)); | ||
241 | DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct systemcfg, tb_update_count)); | ||
242 | DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct systemcfg, tz_minuteswest)); | ||
243 | DEFINE(CFG_TZ_DSTTIME, offsetof(struct systemcfg, tz_dsttime)); | ||
244 | DEFINE(CFG_SYSCALL_MAP32, offsetof(struct systemcfg, syscall_map_32)); | ||
245 | DEFINE(CFG_SYSCALL_MAP64, offsetof(struct systemcfg, syscall_map_64)); | ||
246 | |||
247 | /* timeval/timezone offsets for use by vdso */ | ||
248 | DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec)); | ||
249 | DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec)); | ||
250 | DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec)); | ||
251 | DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec)); | ||
252 | DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); | ||
253 | DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); | ||
254 | #endif | ||
255 | |||
256 | DEFINE(pbe_address, offsetof(struct pbe, address)); | ||
257 | DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); | ||
258 | DEFINE(pbe_next, offsetof(struct pbe, next)); | ||
259 | |||
260 | DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); | ||
261 | return 0; | ||
262 | } | ||
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S new file mode 100644 index 000000000000..665d7d34304c --- /dev/null +++ b/arch/powerpc/kernel/fpu.S | |||
@@ -0,0 +1,133 @@ | |||
1 | /* | ||
2 | * FPU support code, moved here from head.S so that it can be used | ||
3 | * by chips which use other head-whatever.S files. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <asm/processor.h> | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/mmu.h> | ||
16 | #include <asm/pgtable.h> | ||
17 | #include <asm/cputable.h> | ||
18 | #include <asm/cache.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | #include <asm/ppc_asm.h> | ||
21 | #include <asm/asm-offsets.h> | ||
22 | |||
23 | /* | ||
24 | * This task wants to use the FPU now. | ||
25 | * On UP, disable FP for the task which had the FPU previously, | ||
26 | * and save its floating-point registers in its thread_struct. | ||
27 | * Load up this task's FP registers from its thread_struct, | ||
28 | * enable the FPU for the current task and return to the task. | ||
29 | */ | ||
30 | .globl load_up_fpu | ||
31 | load_up_fpu: | ||
32 | mfmsr r5 | ||
33 | ori r5,r5,MSR_FP | ||
34 | #ifdef CONFIG_PPC64BRIDGE | ||
35 | clrldi r5,r5,1 /* turn off 64-bit mode */ | ||
36 | #endif /* CONFIG_PPC64BRIDGE */ | ||
37 | SYNC | ||
38 | MTMSRD(r5) /* enable use of fpu now */ | ||
39 | isync | ||
40 | /* | ||
41 | * For SMP, we don't do lazy FPU switching because it just gets too | ||
42 | * horrendously complex, especially when a task switches from one CPU | ||
43 | * to another. Instead we call giveup_fpu in switch_to. | ||
44 | */ | ||
45 | #ifndef CONFIG_SMP | ||
46 | tophys(r6,0) /* get __pa constant */ | ||
47 | addis r3,r6,last_task_used_math@ha | ||
48 | lwz r4,last_task_used_math@l(r3) | ||
49 | cmpwi 0,r4,0 | ||
50 | beq 1f | ||
51 | add r4,r4,r6 | ||
52 | addi r4,r4,THREAD /* want last_task_used_math->thread */ | ||
53 | SAVE_32FPRS(0, r4) | ||
54 | mffs fr0 | ||
55 | stfd fr0,THREAD_FPSCR-4(r4) | ||
56 | lwz r5,PT_REGS(r4) | ||
57 | add r5,r5,r6 | ||
58 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
59 | li r10,MSR_FP|MSR_FE0|MSR_FE1 | ||
60 | andc r4,r4,r10 /* disable FP for previous task */ | ||
61 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
62 | 1: | ||
63 | #endif /* CONFIG_SMP */ | ||
64 | /* enable use of FP after return */ | ||
65 | mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ | ||
66 | lwz r4,THREAD_FPEXC_MODE(r5) | ||
67 | ori r9,r9,MSR_FP /* enable FP for current */ | ||
68 | or r9,r9,r4 | ||
69 | lfd fr0,THREAD_FPSCR-4(r5) | ||
70 | mtfsf 0xff,fr0 | ||
71 | REST_32FPRS(0, r5) | ||
72 | #ifndef CONFIG_SMP | ||
73 | subi r4,r5,THREAD | ||
74 | sub r4,r4,r6 | ||
75 | stw r4,last_task_used_math@l(r3) | ||
76 | #endif /* CONFIG_SMP */ | ||
77 | /* restore registers and return */ | ||
78 | /* we haven't used ctr or xer or lr */ | ||
79 | b fast_exception_return | ||
80 | |||
81 | /* | ||
82 | * FP unavailable trap from kernel - print a message, but let | ||
83 | * the task use FP in the kernel until it returns to user mode. | ||
84 | */ | ||
85 | .globl KernelFP | ||
86 | KernelFP: | ||
87 | lwz r3,_MSR(r1) | ||
88 | ori r3,r3,MSR_FP | ||
89 | stw r3,_MSR(r1) /* enable use of FP after return */ | ||
90 | lis r3,86f@h | ||
91 | ori r3,r3,86f@l | ||
92 | mr r4,r2 /* current */ | ||
93 | lwz r5,_NIP(r1) | ||
94 | bl printk | ||
95 | b ret_from_except | ||
96 | 86: .string "floating point used in kernel (task=%p, pc=%x)\n" | ||
97 | .align 4,0 | ||
98 | |||
99 | /* | ||
100 | * giveup_fpu(tsk) | ||
101 | * Disable FP for the task given as the argument, | ||
102 | * and save the floating-point registers in its thread_struct. | ||
103 | * Enables the FPU for use in the kernel on return. | ||
104 | */ | ||
105 | .globl giveup_fpu | ||
106 | giveup_fpu: | ||
107 | mfmsr r5 | ||
108 | ori r5,r5,MSR_FP | ||
109 | SYNC_601 | ||
110 | ISYNC_601 | ||
111 | MTMSRD(r5) /* enable use of fpu now */ | ||
112 | SYNC_601 | ||
113 | isync | ||
114 | cmpwi 0,r3,0 | ||
115 | beqlr- /* if no previous owner, done */ | ||
116 | addi r3,r3,THREAD /* want THREAD of task */ | ||
117 | lwz r5,PT_REGS(r3) | ||
118 | cmpwi 0,r5,0 | ||
119 | SAVE_32FPRS(0, r3) | ||
120 | mffs fr0 | ||
121 | stfd fr0,THREAD_FPSCR-4(r3) | ||
122 | beq 1f | ||
123 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
124 | li r3,MSR_FP|MSR_FE0|MSR_FE1 | ||
125 | andc r4,r4,r3 /* disable FP for previous task */ | ||
126 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
127 | 1: | ||
128 | #ifndef CONFIG_SMP | ||
129 | li r5,0 | ||
130 | lis r4,last_task_used_math@ha | ||
131 | stw r5,last_task_used_math@l(r4) | ||
132 | #endif /* CONFIG_SMP */ | ||
133 | blr | ||
diff --git a/arch/powerpc/kernel/head.S b/arch/powerpc/kernel/head.S new file mode 100644 index 000000000000..d05509f197d0 --- /dev/null +++ b/arch/powerpc/kernel/head.S | |||
@@ -0,0 +1,1545 @@ | |||
1 | /* | ||
2 | * PowerPC version | ||
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
4 | * | ||
5 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | ||
6 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
7 | * Adapted for Power Macintosh by Paul Mackerras. | ||
8 | * Low-level exception handlers and MMU support | ||
9 | * rewritten by Paul Mackerras. | ||
10 | * Copyright (C) 1996 Paul Mackerras. | ||
11 | * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | ||
12 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
13 | * | ||
14 | * This file contains the low-level support and setup for the | ||
15 | * PowerPC platform, including trap and interrupt dispatch. | ||
16 | * (The PPC 8xx embedded CPUs use head_8xx.S instead.) | ||
17 | * | ||
18 | * This program is free software; you can redistribute it and/or | ||
19 | * modify it under the terms of the GNU General Public License | ||
20 | * as published by the Free Software Foundation; either version | ||
21 | * 2 of the License, or (at your option) any later version. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #include <linux/config.h> | ||
26 | #include <asm/processor.h> | ||
27 | #include <asm/page.h> | ||
28 | #include <asm/mmu.h> | ||
29 | #include <asm/pgtable.h> | ||
30 | #include <asm/cputable.h> | ||
31 | #include <asm/cache.h> | ||
32 | #include <asm/thread_info.h> | ||
33 | #include <asm/ppc_asm.h> | ||
34 | #include <asm/asm-offsets.h> | ||
35 | |||
36 | #ifdef CONFIG_APUS | ||
37 | #include <asm/amigappc.h> | ||
38 | #endif | ||
39 | |||
40 | #ifdef CONFIG_PPC64BRIDGE | ||
41 | #define LOAD_BAT(n, reg, RA, RB) \ | ||
42 | ld RA,(n*32)+0(reg); \ | ||
43 | ld RB,(n*32)+8(reg); \ | ||
44 | mtspr SPRN_IBAT##n##U,RA; \ | ||
45 | mtspr SPRN_IBAT##n##L,RB; \ | ||
46 | ld RA,(n*32)+16(reg); \ | ||
47 | ld RB,(n*32)+24(reg); \ | ||
48 | mtspr SPRN_DBAT##n##U,RA; \ | ||
49 | mtspr SPRN_DBAT##n##L,RB; \ | ||
50 | |||
51 | #else /* CONFIG_PPC64BRIDGE */ | ||
52 | |||
53 | /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ | ||
54 | #define LOAD_BAT(n, reg, RA, RB) \ | ||
55 | /* see the comment for clear_bats() -- Cort */ \ | ||
56 | li RA,0; \ | ||
57 | mtspr SPRN_IBAT##n##U,RA; \ | ||
58 | mtspr SPRN_DBAT##n##U,RA; \ | ||
59 | lwz RA,(n*16)+0(reg); \ | ||
60 | lwz RB,(n*16)+4(reg); \ | ||
61 | mtspr SPRN_IBAT##n##U,RA; \ | ||
62 | mtspr SPRN_IBAT##n##L,RB; \ | ||
63 | beq 1f; \ | ||
64 | lwz RA,(n*16)+8(reg); \ | ||
65 | lwz RB,(n*16)+12(reg); \ | ||
66 | mtspr SPRN_DBAT##n##U,RA; \ | ||
67 | mtspr SPRN_DBAT##n##L,RB; \ | ||
68 | 1: | ||
69 | #endif /* CONFIG_PPC64BRIDGE */ | ||
70 | |||
71 | .text | ||
72 | .stabs "arch/ppc/kernel/",N_SO,0,0,0f | ||
73 | .stabs "head.S",N_SO,0,0,0f | ||
74 | 0: | ||
75 | .globl _stext | ||
76 | _stext: | ||
77 | |||
78 | /* | ||
79 | * _start is defined this way because the XCOFF loader in the OpenFirmware | ||
80 | * on the powermac expects the entry point to be a procedure descriptor. | ||
81 | */ | ||
82 | .text | ||
83 | .globl _start | ||
84 | _start: | ||
85 | /* | ||
86 | * These are here for legacy reasons, the kernel used to | ||
87 | * need to look like a coff function entry for the pmac | ||
88 | * but we're always started by some kind of bootloader now. | ||
89 | * -- Cort | ||
90 | */ | ||
91 | nop /* used by __secondary_hold on prep (mtx) and chrp smp */ | ||
92 | nop /* used by __secondary_hold on prep (mtx) and chrp smp */ | ||
93 | nop | ||
94 | |||
95 | /* PMAC | ||
96 | * Enter here with the kernel text, data and bss loaded starting at | ||
97 | * 0, running with virtual == physical mapping. | ||
98 | * r5 points to the prom entry point (the client interface handler | ||
99 | * address). Address translation is turned on, with the prom | ||
100 | * managing the hash table. Interrupts are disabled. The stack | ||
101 | * pointer (r1) points to just below the end of the half-meg region | ||
102 | * from 0x380000 - 0x400000, which is mapped in already. | ||
103 | * | ||
104 | * If we are booted from MacOS via BootX, we enter with the kernel | ||
105 | * image loaded somewhere, and the following values in registers: | ||
106 | * r3: 'BooX' (0x426f6f58) | ||
107 | * r4: virtual address of boot_infos_t | ||
108 | * r5: 0 | ||
109 | * | ||
110 | * APUS | ||
111 | * r3: 'APUS' | ||
112 | * r4: physical address of memory base | ||
113 | * Linux/m68k style BootInfo structure at &_end. | ||
114 | * | ||
115 | * PREP | ||
116 | * This is jumped to on prep systems right after the kernel is relocated | ||
117 | * to its proper place in memory by the boot loader. The expected layout | ||
118 | * of the regs is: | ||
119 | * r3: ptr to residual data | ||
120 | * r4: initrd_start or if no initrd then 0 | ||
121 | * r5: initrd_end - unused if r4 is 0 | ||
122 | * r6: Start of command line string | ||
123 | * r7: End of command line string | ||
124 | * | ||
125 | * This just gets a minimal mmu environment setup so we can call | ||
126 | * start_here() to do the real work. | ||
127 | * -- Cort | ||
128 | */ | ||
129 | |||
130 | .globl __start | ||
131 | __start: | ||
132 | /* | ||
133 | * We have to do any OF calls before we map ourselves to KERNELBASE, | ||
134 | * because OF may have I/O devices mapped into that area | ||
135 | * (particularly on CHRP). | ||
136 | */ | ||
137 | mr r31,r3 /* save parameters */ | ||
138 | mr r30,r4 | ||
139 | mr r29,r5 | ||
140 | mr r28,r6 | ||
141 | mr r27,r7 | ||
142 | li r24,0 /* cpu # */ | ||
143 | |||
144 | /* | ||
145 | * early_init() does the early machine identification and does | ||
146 | * the necessary low-level setup and clears the BSS | ||
147 | * -- Cort <cort@fsmlabs.com> | ||
148 | */ | ||
149 | bl early_init | ||
150 | |||
151 | /* | ||
152 | * On POWER4, we first need to tweak some CPU configuration registers | ||
153 | * like real mode cache inhibit or exception base | ||
154 | */ | ||
155 | #ifdef CONFIG_POWER4 | ||
156 | bl __970_cpu_preinit | ||
157 | #endif /* CONFIG_POWER4 */ | ||
158 | |||
159 | #ifdef CONFIG_APUS | ||
160 | /* On APUS the __va/__pa constants need to be set to the correct | ||
161 | * values before continuing. | ||
162 | */ | ||
163 | mr r4,r30 | ||
164 | bl fix_mem_constants | ||
165 | #endif /* CONFIG_APUS */ | ||
166 | |||
167 | /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains | ||
168 | * the physical address we are running at, returned by early_init() | ||
169 | */ | ||
170 | bl mmu_off | ||
171 | __after_mmu_off: | ||
172 | #ifndef CONFIG_POWER4 | ||
173 | bl clear_bats | ||
174 | bl flush_tlbs | ||
175 | |||
176 | bl initial_bats | ||
177 | #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) | ||
178 | bl setup_disp_bat | ||
179 | #endif | ||
180 | #else /* CONFIG_POWER4 */ | ||
181 | bl reloc_offset | ||
182 | bl initial_mm_power4 | ||
183 | #endif /* CONFIG_POWER4 */ | ||
184 | |||
185 | /* | ||
186 | * Call setup_cpu for CPU 0 and initialize 6xx Idle | ||
187 | */ | ||
188 | bl reloc_offset | ||
189 | li r24,0 /* cpu# */ | ||
190 | bl call_setup_cpu /* Call setup_cpu for this CPU */ | ||
191 | #ifdef CONFIG_6xx | ||
192 | bl reloc_offset | ||
193 | bl init_idle_6xx | ||
194 | #endif /* CONFIG_6xx */ | ||
195 | #ifdef CONFIG_POWER4 | ||
196 | bl reloc_offset | ||
197 | bl init_idle_power4 | ||
198 | #endif /* CONFIG_POWER4 */ | ||
199 | |||
200 | |||
201 | #ifndef CONFIG_APUS | ||
202 | /* | ||
203 | * We need to run with _start at physical address 0. | ||
204 | * On CHRP, we are loaded at 0x10000 since OF on CHRP uses | ||
205 | * the exception vectors at 0 (and therefore this copy | ||
206 | * overwrites OF's exception vectors with our own). | ||
207 | * If the MMU is already turned on, we copy stuff to KERNELBASE, | ||
208 | * otherwise we copy it to 0. | ||
209 | */ | ||
210 | bl reloc_offset | ||
211 | mr r26,r3 | ||
212 | addis r4,r3,KERNELBASE@h /* current address of _start */ | ||
213 | cmpwi 0,r4,0 /* are we already running at 0? */ | ||
214 | bne relocate_kernel | ||
215 | #endif /* CONFIG_APUS */ | ||
216 | /* | ||
217 | * we now have the 1st 16M of ram mapped with the bats. | ||
218 | * prep needs the mmu to be turned on here, but pmac already has it on. | ||
219 | * this shouldn't bother the pmac since it just gets turned on again | ||
220 | * as we jump to our code at KERNELBASE. -- Cort | ||
221 | * Actually no, pmac doesn't have it on any more. BootX enters with MMU | ||
222 | * off, and in other cases, we now turn it off before changing BATs above. | ||
223 | */ | ||
224 | turn_on_mmu: | ||
225 | mfmsr r0 | ||
226 | ori r0,r0,MSR_DR|MSR_IR | ||
227 | mtspr SPRN_SRR1,r0 | ||
228 | lis r0,start_here@h | ||
229 | ori r0,r0,start_here@l | ||
230 | mtspr SPRN_SRR0,r0 | ||
231 | SYNC | ||
232 | RFI /* enables MMU */ | ||
233 | |||
234 | /* | ||
235 | * We need __secondary_hold as a place to hold the other cpus on | ||
236 | * an SMP machine, even when we are running a UP kernel. | ||
237 | */ | ||
238 | . = 0xc0 /* for prep bootloader */ | ||
239 | li r3,1 /* MTX only has 1 cpu */ | ||
240 | .globl __secondary_hold | ||
241 | __secondary_hold: | ||
242 | /* tell the master we're here */ | ||
243 | stw r3,4(0) | ||
244 | #ifdef CONFIG_SMP | ||
245 | 100: lwz r4,0(0) | ||
246 | /* wait until we're told to start */ | ||
247 | cmpw 0,r4,r3 | ||
248 | bne 100b | ||
249 | /* our cpu # was at addr 0 - go */ | ||
250 | mr r24,r3 /* cpu # */ | ||
251 | b __secondary_start | ||
252 | #else | ||
253 | b . | ||
254 | #endif /* CONFIG_SMP */ | ||
255 | |||
256 | /* | ||
257 | * Exception entry code. This code runs with address translation | ||
258 | * turned off, i.e. using physical addresses. | ||
259 | * We assume sprg3 has the physical address of the current | ||
260 | * task's thread_struct. | ||
261 | */ | ||
262 | #define EXCEPTION_PROLOG \ | ||
263 | mtspr SPRN_SPRG0,r10; \ | ||
264 | mtspr SPRN_SPRG1,r11; \ | ||
265 | mfcr r10; \ | ||
266 | EXCEPTION_PROLOG_1; \ | ||
267 | EXCEPTION_PROLOG_2 | ||
268 | |||
269 | #define EXCEPTION_PROLOG_1 \ | ||
270 | mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \ | ||
271 | andi. r11,r11,MSR_PR; \ | ||
272 | tophys(r11,r1); /* use tophys(r1) if kernel */ \ | ||
273 | beq 1f; \ | ||
274 | mfspr r11,SPRN_SPRG3; \ | ||
275 | lwz r11,THREAD_INFO-THREAD(r11); \ | ||
276 | addi r11,r11,THREAD_SIZE; \ | ||
277 | tophys(r11,r11); \ | ||
278 | 1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */ | ||
279 | |||
280 | |||
281 | #define EXCEPTION_PROLOG_2 \ | ||
282 | CLR_TOP32(r11); \ | ||
283 | stw r10,_CCR(r11); /* save registers */ \ | ||
284 | stw r12,GPR12(r11); \ | ||
285 | stw r9,GPR9(r11); \ | ||
286 | mfspr r10,SPRN_SPRG0; \ | ||
287 | stw r10,GPR10(r11); \ | ||
288 | mfspr r12,SPRN_SPRG1; \ | ||
289 | stw r12,GPR11(r11); \ | ||
290 | mflr r10; \ | ||
291 | stw r10,_LINK(r11); \ | ||
292 | mfspr r12,SPRN_SRR0; \ | ||
293 | mfspr r9,SPRN_SRR1; \ | ||
294 | stw r1,GPR1(r11); \ | ||
295 | stw r1,0(r11); \ | ||
296 | tovirt(r1,r11); /* set new kernel sp */ \ | ||
297 | li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ | ||
298 | MTMSRD(r10); /* (except for mach check in rtas) */ \ | ||
299 | stw r0,GPR0(r11); \ | ||
300 | SAVE_4GPRS(3, r11); \ | ||
301 | SAVE_2GPRS(7, r11) | ||
302 | |||
303 | /* | ||
304 | * Note: code which follows this uses cr0.eq (set if from kernel), | ||
305 | * r11, r12 (SRR0), and r9 (SRR1). | ||
306 | * | ||
307 | * Note2: once we have set r1 we are in a position to take exceptions | ||
308 | * again, and we could thus set MSR:RI at that point. | ||
309 | */ | ||
310 | |||
311 | /* | ||
312 | * Exception vectors. | ||
313 | */ | ||
314 | #define EXCEPTION(n, label, hdlr, xfer) \ | ||
315 | . = n; \ | ||
316 | label: \ | ||
317 | EXCEPTION_PROLOG; \ | ||
318 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
319 | xfer(n, hdlr) | ||
320 | |||
321 | #define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \ | ||
322 | li r10,trap; \ | ||
323 | stw r10,TRAP(r11); \ | ||
324 | li r10,MSR_KERNEL; \ | ||
325 | copyee(r10, r9); \ | ||
326 | bl tfer; \ | ||
327 | i##n: \ | ||
328 | .long hdlr; \ | ||
329 | .long ret | ||
330 | |||
331 | #define COPY_EE(d, s) rlwimi d,s,0,16,16 | ||
332 | #define NOCOPY(d, s) | ||
333 | |||
334 | #define EXC_XFER_STD(n, hdlr) \ | ||
335 | EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \ | ||
336 | ret_from_except_full) | ||
337 | |||
338 | #define EXC_XFER_LITE(n, hdlr) \ | ||
339 | EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \ | ||
340 | ret_from_except) | ||
341 | |||
342 | #define EXC_XFER_EE(n, hdlr) \ | ||
343 | EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \ | ||
344 | ret_from_except_full) | ||
345 | |||
346 | #define EXC_XFER_EE_LITE(n, hdlr) \ | ||
347 | EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \ | ||
348 | ret_from_except) | ||
349 | |||
350 | /* System reset */ | ||
351 | /* core99 pmac starts the seconary here by changing the vector, and | ||
352 | putting it back to what it was (UnknownException) when done. */ | ||
353 | #if defined(CONFIG_GEMINI) && defined(CONFIG_SMP) | ||
354 | . = 0x100 | ||
355 | b __secondary_start_gemini | ||
356 | #else | ||
357 | EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD) | ||
358 | #endif | ||
359 | |||
360 | /* Machine check */ | ||
361 | /* | ||
362 | * On CHRP, this is complicated by the fact that we could get a | ||
363 | * machine check inside RTAS, and we have no guarantee that certain | ||
364 | * critical registers will have the values we expect. The set of | ||
365 | * registers that might have bad values includes all the GPRs | ||
366 | * and all the BATs. We indicate that we are in RTAS by putting | ||
367 | * a non-zero value, the address of the exception frame to use, | ||
368 | * in SPRG2. The machine check handler checks SPRG2 and uses its | ||
369 | * value if it is non-zero. If we ever needed to free up SPRG2, | ||
370 | * we could use a field in the thread_info or thread_struct instead. | ||
371 | * (Other exception handlers assume that r1 is a valid kernel stack | ||
372 | * pointer when we take an exception from supervisor mode.) | ||
373 | * -- paulus. | ||
374 | */ | ||
375 | . = 0x200 | ||
376 | mtspr SPRN_SPRG0,r10 | ||
377 | mtspr SPRN_SPRG1,r11 | ||
378 | mfcr r10 | ||
379 | #ifdef CONFIG_PPC_CHRP | ||
380 | mfspr r11,SPRN_SPRG2 | ||
381 | cmpwi 0,r11,0 | ||
382 | bne 7f | ||
383 | #endif /* CONFIG_PPC_CHRP */ | ||
384 | EXCEPTION_PROLOG_1 | ||
385 | 7: EXCEPTION_PROLOG_2 | ||
386 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
387 | #ifdef CONFIG_PPC_CHRP | ||
388 | mfspr r4,SPRN_SPRG2 | ||
389 | cmpwi cr1,r4,0 | ||
390 | bne cr1,1f | ||
391 | #endif | ||
392 | EXC_XFER_STD(0x200, MachineCheckException) | ||
393 | #ifdef CONFIG_PPC_CHRP | ||
394 | 1: b machine_check_in_rtas | ||
395 | #endif | ||
396 | |||
397 | /* Data access exception. */ | ||
398 | . = 0x300 | ||
399 | #ifdef CONFIG_PPC64BRIDGE | ||
400 | b DataAccess | ||
401 | DataAccessCont: | ||
402 | #else | ||
403 | DataAccess: | ||
404 | EXCEPTION_PROLOG | ||
405 | #endif /* CONFIG_PPC64BRIDGE */ | ||
406 | mfspr r10,SPRN_DSISR | ||
407 | andis. r0,r10,0xa470 /* weird error? */ | ||
408 | bne 1f /* if not, try to put a PTE */ | ||
409 | mfspr r4,SPRN_DAR /* into the hash table */ | ||
410 | rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ | ||
411 | bl hash_page | ||
412 | 1: stw r10,_DSISR(r11) | ||
413 | mr r5,r10 | ||
414 | mfspr r4,SPRN_DAR | ||
415 | EXC_XFER_EE_LITE(0x300, handle_page_fault) | ||
416 | |||
417 | #ifdef CONFIG_PPC64BRIDGE | ||
418 | /* SLB fault on data access. */ | ||
419 | . = 0x380 | ||
420 | b DataSegment | ||
421 | #endif /* CONFIG_PPC64BRIDGE */ | ||
422 | |||
423 | /* Instruction access exception. */ | ||
424 | . = 0x400 | ||
425 | #ifdef CONFIG_PPC64BRIDGE | ||
426 | b InstructionAccess | ||
427 | InstructionAccessCont: | ||
428 | #else | ||
429 | InstructionAccess: | ||
430 | EXCEPTION_PROLOG | ||
431 | #endif /* CONFIG_PPC64BRIDGE */ | ||
432 | andis. r0,r9,0x4000 /* no pte found? */ | ||
433 | beq 1f /* if so, try to put a PTE */ | ||
434 | li r3,0 /* into the hash table */ | ||
435 | mr r4,r12 /* SRR0 is fault address */ | ||
436 | bl hash_page | ||
437 | 1: mr r4,r12 | ||
438 | mr r5,r9 | ||
439 | EXC_XFER_EE_LITE(0x400, handle_page_fault) | ||
440 | |||
441 | #ifdef CONFIG_PPC64BRIDGE | ||
442 | /* SLB fault on instruction access. */ | ||
443 | . = 0x480 | ||
444 | b InstructionSegment | ||
445 | #endif /* CONFIG_PPC64BRIDGE */ | ||
446 | |||
447 | /* External interrupt */ | ||
448 | EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) | ||
449 | |||
450 | /* Alignment exception */ | ||
451 | . = 0x600 | ||
452 | Alignment: | ||
453 | EXCEPTION_PROLOG | ||
454 | mfspr r4,SPRN_DAR | ||
455 | stw r4,_DAR(r11) | ||
456 | mfspr r5,SPRN_DSISR | ||
457 | stw r5,_DSISR(r11) | ||
458 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
459 | EXC_XFER_EE(0x600, AlignmentException) | ||
460 | |||
461 | /* Program check exception */ | ||
462 | EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD) | ||
463 | |||
464 | /* Floating-point unavailable */ | ||
465 | . = 0x800 | ||
466 | FPUnavailable: | ||
467 | EXCEPTION_PROLOG | ||
468 | bne load_up_fpu /* if from user, just load it up */ | ||
469 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
470 | EXC_XFER_EE_LITE(0x800, KernelFP) | ||
471 | |||
472 | /* Decrementer */ | ||
473 | EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) | ||
474 | |||
475 | EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE) | ||
476 | EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE) | ||
477 | |||
478 | /* System call */ | ||
479 | . = 0xc00 | ||
480 | SystemCall: | ||
481 | EXCEPTION_PROLOG | ||
482 | EXC_XFER_EE_LITE(0xc00, DoSyscall) | ||
483 | |||
484 | /* Single step - not used on 601 */ | ||
485 | EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD) | ||
486 | EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE) | ||
487 | |||
488 | /* | ||
489 | * The Altivec unavailable trap is at 0x0f20. Foo. | ||
490 | * We effectively remap it to 0x3000. | ||
491 | * We include an altivec unavailable exception vector even if | ||
492 | * not configured for Altivec, so that you can't panic a | ||
493 | * non-altivec kernel running on a machine with altivec just | ||
494 | * by executing an altivec instruction. | ||
495 | */ | ||
496 | . = 0xf00 | ||
497 | b Trap_0f | ||
498 | |||
499 | . = 0xf20 | ||
500 | b AltiVecUnavailable | ||
501 | |||
502 | Trap_0f: | ||
503 | EXCEPTION_PROLOG | ||
504 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
505 | EXC_XFER_EE(0xf00, UnknownException) | ||
506 | |||
507 | /* | ||
508 | * Handle TLB miss for instruction on 603/603e. | ||
509 | * Note: we get an alternate set of r0 - r3 to use automatically. | ||
510 | */ | ||
511 | . = 0x1000 | ||
512 | InstructionTLBMiss: | ||
513 | /* | ||
514 | * r0: stored ctr | ||
515 | * r1: linux style pte ( later becomes ppc hardware pte ) | ||
516 | * r2: ptr to linux-style pte | ||
517 | * r3: scratch | ||
518 | */ | ||
519 | mfctr r0 | ||
520 | /* Get PTE (linux-style) and check access */ | ||
521 | mfspr r3,SPRN_IMISS | ||
522 | lis r1,KERNELBASE@h /* check if kernel address */ | ||
523 | cmplw 0,r3,r1 | ||
524 | mfspr r2,SPRN_SPRG3 | ||
525 | li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ | ||
526 | lwz r2,PGDIR(r2) | ||
527 | blt+ 112f | ||
528 | lis r2,swapper_pg_dir@ha /* if kernel address, use */ | ||
529 | addi r2,r2,swapper_pg_dir@l /* kernel page table */ | ||
530 | mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ | ||
531 | rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ | ||
532 | 112: tophys(r2,r2) | ||
533 | rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ | ||
534 | lwz r2,0(r2) /* get pmd entry */ | ||
535 | rlwinm. r2,r2,0,0,19 /* extract address of pte page */ | ||
536 | beq- InstructionAddressInvalid /* return if no mapping */ | ||
537 | rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ | ||
538 | lwz r3,0(r2) /* get linux-style pte */ | ||
539 | andc. r1,r1,r3 /* check access & ~permission */ | ||
540 | bne- InstructionAddressInvalid /* return if access not permitted */ | ||
541 | ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ | ||
542 | /* | ||
543 | * NOTE! We are assuming this is not an SMP system, otherwise | ||
544 | * we would need to update the pte atomically with lwarx/stwcx. | ||
545 | */ | ||
546 | stw r3,0(r2) /* update PTE (accessed bit) */ | ||
547 | /* Convert linux-style PTE to low word of PPC-style PTE */ | ||
548 | rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ | ||
549 | rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ | ||
550 | and r1,r1,r2 /* writable if _RW and _DIRTY */ | ||
551 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | ||
552 | rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ | ||
553 | ori r1,r1,0xe14 /* clear out reserved bits and M */ | ||
554 | andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ | ||
555 | mtspr SPRN_RPA,r1 | ||
556 | mfspr r3,SPRN_IMISS | ||
557 | tlbli r3 | ||
558 | mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ | ||
559 | mtcrf 0x80,r3 | ||
560 | rfi | ||
561 | InstructionAddressInvalid: | ||
562 | mfspr r3,SPRN_SRR1 | ||
563 | rlwinm r1,r3,9,6,6 /* Get load/store bit */ | ||
564 | |||
565 | addis r1,r1,0x2000 | ||
566 | mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */ | ||
567 | mtctr r0 /* Restore CTR */ | ||
568 | andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ | ||
569 | or r2,r2,r1 | ||
570 | mtspr SPRN_SRR1,r2 | ||
571 | mfspr r1,SPRN_IMISS /* Get failing address */ | ||
572 | rlwinm. r2,r2,0,31,31 /* Check for little endian access */ | ||
573 | rlwimi r2,r2,1,30,30 /* change 1 -> 3 */ | ||
574 | xor r1,r1,r2 | ||
575 | mtspr SPRN_DAR,r1 /* Set fault address */ | ||
576 | mfmsr r0 /* Restore "normal" registers */ | ||
577 | xoris r0,r0,MSR_TGPR>>16 | ||
578 | mtcrf 0x80,r3 /* Restore CR0 */ | ||
579 | mtmsr r0 | ||
580 | b InstructionAccess | ||
581 | |||
582 | /* | ||
583 | * Handle TLB miss for DATA Load operation on 603/603e | ||
584 | */ | ||
585 | . = 0x1100 | ||
586 | DataLoadTLBMiss: | ||
587 | /* | ||
588 | * r0: stored ctr | ||
589 | * r1: linux style pte ( later becomes ppc hardware pte ) | ||
590 | * r2: ptr to linux-style pte | ||
591 | * r3: scratch | ||
592 | */ | ||
593 | mfctr r0 | ||
594 | /* Get PTE (linux-style) and check access */ | ||
595 | mfspr r3,SPRN_DMISS | ||
596 | lis r1,KERNELBASE@h /* check if kernel address */ | ||
597 | cmplw 0,r3,r1 | ||
598 | mfspr r2,SPRN_SPRG3 | ||
599 | li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ | ||
600 | lwz r2,PGDIR(r2) | ||
601 | blt+ 112f | ||
602 | lis r2,swapper_pg_dir@ha /* if kernel address, use */ | ||
603 | addi r2,r2,swapper_pg_dir@l /* kernel page table */ | ||
604 | mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ | ||
605 | rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ | ||
606 | 112: tophys(r2,r2) | ||
607 | rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ | ||
608 | lwz r2,0(r2) /* get pmd entry */ | ||
609 | rlwinm. r2,r2,0,0,19 /* extract address of pte page */ | ||
610 | beq- DataAddressInvalid /* return if no mapping */ | ||
611 | rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ | ||
612 | lwz r3,0(r2) /* get linux-style pte */ | ||
613 | andc. r1,r1,r3 /* check access & ~permission */ | ||
614 | bne- DataAddressInvalid /* return if access not permitted */ | ||
615 | ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ | ||
616 | /* | ||
617 | * NOTE! We are assuming this is not an SMP system, otherwise | ||
618 | * we would need to update the pte atomically with lwarx/stwcx. | ||
619 | */ | ||
620 | stw r3,0(r2) /* update PTE (accessed bit) */ | ||
621 | /* Convert linux-style PTE to low word of PPC-style PTE */ | ||
622 | rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ | ||
623 | rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ | ||
624 | and r1,r1,r2 /* writable if _RW and _DIRTY */ | ||
625 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | ||
626 | rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ | ||
627 | ori r1,r1,0xe14 /* clear out reserved bits and M */ | ||
628 | andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ | ||
629 | mtspr SPRN_RPA,r1 | ||
630 | mfspr r3,SPRN_DMISS | ||
631 | tlbld r3 | ||
632 | mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ | ||
633 | mtcrf 0x80,r3 | ||
634 | rfi | ||
635 | DataAddressInvalid: | ||
636 | mfspr r3,SPRN_SRR1 | ||
637 | rlwinm r1,r3,9,6,6 /* Get load/store bit */ | ||
638 | addis r1,r1,0x2000 | ||
639 | mtspr SPRN_DSISR,r1 | ||
640 | mtctr r0 /* Restore CTR */ | ||
641 | andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ | ||
642 | mtspr SPRN_SRR1,r2 | ||
643 | mfspr r1,SPRN_DMISS /* Get failing address */ | ||
644 | rlwinm. r2,r2,0,31,31 /* Check for little endian access */ | ||
645 | beq 20f /* Jump if big endian */ | ||
646 | xori r1,r1,3 | ||
647 | 20: mtspr SPRN_DAR,r1 /* Set fault address */ | ||
648 | mfmsr r0 /* Restore "normal" registers */ | ||
649 | xoris r0,r0,MSR_TGPR>>16 | ||
650 | mtcrf 0x80,r3 /* Restore CR0 */ | ||
651 | mtmsr r0 | ||
652 | b DataAccess | ||
653 | |||
654 | /* | ||
655 | * Handle TLB miss for DATA Store on 603/603e | ||
656 | */ | ||
657 | . = 0x1200 | ||
658 | DataStoreTLBMiss: | ||
659 | /* | ||
660 | * r0: stored ctr | ||
661 | * r1: linux style pte ( later becomes ppc hardware pte ) | ||
662 | * r2: ptr to linux-style pte | ||
663 | * r3: scratch | ||
664 | */ | ||
665 | mfctr r0 | ||
666 | /* Get PTE (linux-style) and check access */ | ||
667 | mfspr r3,SPRN_DMISS | ||
668 | lis r1,KERNELBASE@h /* check if kernel address */ | ||
669 | cmplw 0,r3,r1 | ||
670 | mfspr r2,SPRN_SPRG3 | ||
671 | li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */ | ||
672 | lwz r2,PGDIR(r2) | ||
673 | blt+ 112f | ||
674 | lis r2,swapper_pg_dir@ha /* if kernel address, use */ | ||
675 | addi r2,r2,swapper_pg_dir@l /* kernel page table */ | ||
676 | mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ | ||
677 | rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ | ||
678 | 112: tophys(r2,r2) | ||
679 | rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ | ||
680 | lwz r2,0(r2) /* get pmd entry */ | ||
681 | rlwinm. r2,r2,0,0,19 /* extract address of pte page */ | ||
682 | beq- DataAddressInvalid /* return if no mapping */ | ||
683 | rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ | ||
684 | lwz r3,0(r2) /* get linux-style pte */ | ||
685 | andc. r1,r1,r3 /* check access & ~permission */ | ||
686 | bne- DataAddressInvalid /* return if access not permitted */ | ||
687 | ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY | ||
688 | /* | ||
689 | * NOTE! We are assuming this is not an SMP system, otherwise | ||
690 | * we would need to update the pte atomically with lwarx/stwcx. | ||
691 | */ | ||
692 | stw r3,0(r2) /* update PTE (accessed/dirty bits) */ | ||
693 | /* Convert linux-style PTE to low word of PPC-style PTE */ | ||
694 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | ||
695 | li r1,0xe15 /* clear out reserved bits and M */ | ||
696 | andc r1,r3,r1 /* PP = user? 2: 0 */ | ||
697 | mtspr SPRN_RPA,r1 | ||
698 | mfspr r3,SPRN_DMISS | ||
699 | tlbld r3 | ||
700 | mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ | ||
701 | mtcrf 0x80,r3 | ||
702 | rfi | ||
703 | |||
704 | #ifndef CONFIG_ALTIVEC | ||
705 | #define AltivecAssistException UnknownException | ||
706 | #endif | ||
707 | |||
708 | EXCEPTION(0x1300, Trap_13, InstructionBreakpoint, EXC_XFER_EE) | ||
709 | EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE) | ||
710 | EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) | ||
711 | #ifdef CONFIG_POWER4 | ||
712 | EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) | ||
713 | EXCEPTION(0x1700, Trap_17, AltivecAssistException, EXC_XFER_EE) | ||
714 | EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD) | ||
715 | #else /* !CONFIG_POWER4 */ | ||
716 | EXCEPTION(0x1600, Trap_16, AltivecAssistException, EXC_XFER_EE) | ||
717 | EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD) | ||
718 | EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) | ||
719 | #endif /* CONFIG_POWER4 */ | ||
720 | EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) | ||
721 | EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE) | ||
722 | EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE) | ||
723 | EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE) | ||
724 | EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE) | ||
725 | EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE) | ||
726 | EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE) | ||
727 | EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE) | ||
728 | EXCEPTION(0x2100, Trap_21, UnknownException, EXC_XFER_EE) | ||
729 | EXCEPTION(0x2200, Trap_22, UnknownException, EXC_XFER_EE) | ||
730 | EXCEPTION(0x2300, Trap_23, UnknownException, EXC_XFER_EE) | ||
731 | EXCEPTION(0x2400, Trap_24, UnknownException, EXC_XFER_EE) | ||
732 | EXCEPTION(0x2500, Trap_25, UnknownException, EXC_XFER_EE) | ||
733 | EXCEPTION(0x2600, Trap_26, UnknownException, EXC_XFER_EE) | ||
734 | EXCEPTION(0x2700, Trap_27, UnknownException, EXC_XFER_EE) | ||
735 | EXCEPTION(0x2800, Trap_28, UnknownException, EXC_XFER_EE) | ||
736 | EXCEPTION(0x2900, Trap_29, UnknownException, EXC_XFER_EE) | ||
737 | EXCEPTION(0x2a00, Trap_2a, UnknownException, EXC_XFER_EE) | ||
738 | EXCEPTION(0x2b00, Trap_2b, UnknownException, EXC_XFER_EE) | ||
739 | EXCEPTION(0x2c00, Trap_2c, UnknownException, EXC_XFER_EE) | ||
740 | EXCEPTION(0x2d00, Trap_2d, UnknownException, EXC_XFER_EE) | ||
741 | EXCEPTION(0x2e00, Trap_2e, UnknownException, EXC_XFER_EE) | ||
742 | EXCEPTION(0x2f00, MOLTrampoline, UnknownException, EXC_XFER_EE_LITE) | ||
743 | |||
744 | .globl mol_trampoline | ||
745 | .set mol_trampoline, i0x2f00 | ||
746 | |||
747 | . = 0x3000 | ||
748 | |||
749 | AltiVecUnavailable: | ||
750 | EXCEPTION_PROLOG | ||
751 | #ifdef CONFIG_ALTIVEC | ||
752 | bne load_up_altivec /* if from user, just load it up */ | ||
753 | #endif /* CONFIG_ALTIVEC */ | ||
754 | EXC_XFER_EE_LITE(0xf20, AltivecUnavailException) | ||
755 | |||
756 | #ifdef CONFIG_PPC64BRIDGE | ||
757 | DataAccess: | ||
758 | EXCEPTION_PROLOG | ||
759 | b DataAccessCont | ||
760 | |||
761 | InstructionAccess: | ||
762 | EXCEPTION_PROLOG | ||
763 | b InstructionAccessCont | ||
764 | |||
765 | DataSegment: | ||
766 | EXCEPTION_PROLOG | ||
767 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
768 | mfspr r4,SPRN_DAR | ||
769 | stw r4,_DAR(r11) | ||
770 | EXC_XFER_STD(0x380, UnknownException) | ||
771 | |||
772 | InstructionSegment: | ||
773 | EXCEPTION_PROLOG | ||
774 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
775 | EXC_XFER_STD(0x480, UnknownException) | ||
776 | #endif /* CONFIG_PPC64BRIDGE */ | ||
777 | |||
778 | #ifdef CONFIG_ALTIVEC | ||
779 | /* Note that the AltiVec support is closely modeled after the FP | ||
780 | * support. Changes to one are likely to be applicable to the | ||
781 | * other! */ | ||
782 | load_up_altivec: | ||
783 | /* | ||
784 | * Disable AltiVec for the task which had AltiVec previously, | ||
785 | * and save its AltiVec registers in its thread_struct. | ||
786 | * Enables AltiVec for use in the kernel on return. | ||
787 | * On SMP we know the AltiVec units are free, since we give it up every | ||
788 | * switch. -- Kumar | ||
789 | */ | ||
790 | mfmsr r5 | ||
791 | oris r5,r5,MSR_VEC@h | ||
792 | MTMSRD(r5) /* enable use of AltiVec now */ | ||
793 | isync | ||
794 | /* | ||
795 | * For SMP, we don't do lazy AltiVec switching because it just gets too | ||
796 | * horrendously complex, especially when a task switches from one CPU | ||
797 | * to another. Instead we call giveup_altivec in switch_to. | ||
798 | */ | ||
799 | #ifndef CONFIG_SMP | ||
800 | tophys(r6,0) | ||
801 | addis r3,r6,last_task_used_altivec@ha | ||
802 | lwz r4,last_task_used_altivec@l(r3) | ||
803 | cmpwi 0,r4,0 | ||
804 | beq 1f | ||
805 | add r4,r4,r6 | ||
806 | addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */ | ||
807 | SAVE_32VRS(0,r10,r4) | ||
808 | mfvscr vr0 | ||
809 | li r10,THREAD_VSCR | ||
810 | stvx vr0,r10,r4 | ||
811 | lwz r5,PT_REGS(r4) | ||
812 | add r5,r5,r6 | ||
813 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
814 | lis r10,MSR_VEC@h | ||
815 | andc r4,r4,r10 /* disable altivec for previous task */ | ||
816 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
817 | 1: | ||
818 | #endif /* CONFIG_SMP */ | ||
819 | /* enable use of AltiVec after return */ | ||
820 | oris r9,r9,MSR_VEC@h | ||
821 | mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ | ||
822 | li r4,1 | ||
823 | li r10,THREAD_VSCR | ||
824 | stw r4,THREAD_USED_VR(r5) | ||
825 | lvx vr0,r10,r5 | ||
826 | mtvscr vr0 | ||
827 | REST_32VRS(0,r10,r5) | ||
828 | #ifndef CONFIG_SMP | ||
829 | subi r4,r5,THREAD | ||
830 | sub r4,r4,r6 | ||
831 | stw r4,last_task_used_altivec@l(r3) | ||
832 | #endif /* CONFIG_SMP */ | ||
833 | /* restore registers and return */ | ||
834 | /* we haven't used ctr or xer or lr */ | ||
835 | b fast_exception_return | ||
836 | |||
837 | /* | ||
838 | * AltiVec unavailable trap from kernel - print a message, but let | ||
839 | * the task use AltiVec in the kernel until it returns to user mode. | ||
840 | */ | ||
841 | KernelAltiVec: | ||
842 | lwz r3,_MSR(r1) | ||
843 | oris r3,r3,MSR_VEC@h | ||
844 | stw r3,_MSR(r1) /* enable use of AltiVec after return */ | ||
845 | lis r3,87f@h | ||
846 | ori r3,r3,87f@l | ||
847 | mr r4,r2 /* current */ | ||
848 | lwz r5,_NIP(r1) | ||
849 | bl printk | ||
850 | b ret_from_except | ||
851 | 87: .string "AltiVec used in kernel (task=%p, pc=%x) \n" | ||
852 | .align 4,0 | ||
853 | |||
854 | /* | ||
855 | * giveup_altivec(tsk) | ||
856 | * Disable AltiVec for the task given as the argument, | ||
857 | * and save the AltiVec registers in its thread_struct. | ||
858 | * Enables AltiVec for use in the kernel on return. | ||
859 | */ | ||
860 | |||
861 | .globl giveup_altivec | ||
862 | giveup_altivec: | ||
863 | mfmsr r5 | ||
864 | oris r5,r5,MSR_VEC@h | ||
865 | SYNC | ||
866 | MTMSRD(r5) /* enable use of AltiVec now */ | ||
867 | isync | ||
868 | cmpwi 0,r3,0 | ||
869 | beqlr- /* if no previous owner, done */ | ||
870 | addi r3,r3,THREAD /* want THREAD of task */ | ||
871 | lwz r5,PT_REGS(r3) | ||
872 | cmpwi 0,r5,0 | ||
873 | SAVE_32VRS(0, r4, r3) | ||
874 | mfvscr vr0 | ||
875 | li r4,THREAD_VSCR | ||
876 | stvx vr0,r4,r3 | ||
877 | beq 1f | ||
878 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
879 | lis r3,MSR_VEC@h | ||
880 | andc r4,r4,r3 /* disable AltiVec for previous task */ | ||
881 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
882 | 1: | ||
883 | #ifndef CONFIG_SMP | ||
884 | li r5,0 | ||
885 | lis r4,last_task_used_altivec@ha | ||
886 | stw r5,last_task_used_altivec@l(r4) | ||
887 | #endif /* CONFIG_SMP */ | ||
888 | blr | ||
889 | #endif /* CONFIG_ALTIVEC */ | ||
890 | |||
891 | /* | ||
892 | * This code is jumped to from the startup code to copy | ||
893 | * the kernel image to physical address 0. | ||
894 | */ | ||
895 | relocate_kernel: | ||
896 | addis r9,r26,klimit@ha /* fetch klimit */ | ||
897 | lwz r25,klimit@l(r9) | ||
898 | addis r25,r25,-KERNELBASE@h | ||
899 | li r3,0 /* Destination base address */ | ||
900 | li r6,0 /* Destination offset */ | ||
901 | li r5,0x4000 /* # bytes of memory to copy */ | ||
902 | bl copy_and_flush /* copy the first 0x4000 bytes */ | ||
903 | addi r0,r3,4f@l /* jump to the address of 4f */ | ||
904 | mtctr r0 /* in copy and do the rest. */ | ||
905 | bctr /* jump to the copy */ | ||
906 | 4: mr r5,r25 | ||
907 | bl copy_and_flush /* copy the rest */ | ||
908 | b turn_on_mmu | ||
909 | |||
910 | /* | ||
911 | * Copy routine used to copy the kernel to start at physical address 0 | ||
912 | * and flush and invalidate the caches as needed. | ||
913 | * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset | ||
914 | * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. | ||
915 | */ | ||
916 | copy_and_flush: | ||
917 | addi r5,r5,-4 | ||
918 | addi r6,r6,-4 | ||
919 | 4: li r0,L1_CACHE_LINE_SIZE/4 | ||
920 | mtctr r0 | ||
921 | 3: addi r6,r6,4 /* copy a cache line */ | ||
922 | lwzx r0,r6,r4 | ||
923 | stwx r0,r6,r3 | ||
924 | bdnz 3b | ||
925 | dcbst r6,r3 /* write it to memory */ | ||
926 | sync | ||
927 | icbi r6,r3 /* flush the icache line */ | ||
928 | cmplw 0,r6,r5 | ||
929 | blt 4b | ||
930 | sync /* additional sync needed on g4 */ | ||
931 | isync | ||
932 | addi r5,r5,4 | ||
933 | addi r6,r6,4 | ||
934 | blr | ||
935 | |||
936 | #ifdef CONFIG_APUS | ||
937 | /* | ||
938 | * On APUS the physical base address of the kernel is not known at compile | ||
939 | * time, which means the __pa/__va constants used are incorrect. In the | ||
940 | * __init section is recorded the virtual addresses of instructions using | ||
941 | * these constants, so all that has to be done is fix these before | ||
942 | * continuing the kernel boot. | ||
943 | * | ||
944 | * r4 = The physical address of the kernel base. | ||
945 | */ | ||
946 | fix_mem_constants: | ||
947 | mr r10,r4 | ||
948 | addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */ | ||
949 | neg r11,r10 /* phys_to_virt constant */ | ||
950 | |||
951 | lis r12,__vtop_table_begin@h | ||
952 | ori r12,r12,__vtop_table_begin@l | ||
953 | add r12,r12,r10 /* table begin phys address */ | ||
954 | lis r13,__vtop_table_end@h | ||
955 | ori r13,r13,__vtop_table_end@l | ||
956 | add r13,r13,r10 /* table end phys address */ | ||
957 | subi r12,r12,4 | ||
958 | subi r13,r13,4 | ||
959 | 1: lwzu r14,4(r12) /* virt address of instruction */ | ||
960 | add r14,r14,r10 /* phys address of instruction */ | ||
961 | lwz r15,0(r14) /* instruction, now insert top */ | ||
962 | rlwimi r15,r10,16,16,31 /* half of vp const in low half */ | ||
963 | stw r15,0(r14) /* of instruction and restore. */ | ||
964 | dcbst r0,r14 /* write it to memory */ | ||
965 | sync | ||
966 | icbi r0,r14 /* flush the icache line */ | ||
967 | cmpw r12,r13 | ||
968 | bne 1b | ||
969 | sync /* additional sync needed on g4 */ | ||
970 | isync | ||
971 | |||
972 | /* | ||
973 | * Map the memory where the exception handlers will | ||
974 | * be copied to when hash constants have been patched. | ||
975 | */ | ||
976 | #ifdef CONFIG_APUS_FAST_EXCEPT | ||
977 | lis r8,0xfff0 | ||
978 | #else | ||
979 | lis r8,0 | ||
980 | #endif | ||
981 | ori r8,r8,0x2 /* 128KB, supervisor */ | ||
982 | mtspr SPRN_DBAT3U,r8 | ||
983 | mtspr SPRN_DBAT3L,r8 | ||
984 | |||
985 | lis r12,__ptov_table_begin@h | ||
986 | ori r12,r12,__ptov_table_begin@l | ||
987 | add r12,r12,r10 /* table begin phys address */ | ||
988 | lis r13,__ptov_table_end@h | ||
989 | ori r13,r13,__ptov_table_end@l | ||
990 | add r13,r13,r10 /* table end phys address */ | ||
991 | subi r12,r12,4 | ||
992 | subi r13,r13,4 | ||
993 | 1: lwzu r14,4(r12) /* virt address of instruction */ | ||
994 | add r14,r14,r10 /* phys address of instruction */ | ||
995 | lwz r15,0(r14) /* instruction, now insert top */ | ||
996 | rlwimi r15,r11,16,16,31 /* half of pv const in low half*/ | ||
997 | stw r15,0(r14) /* of instruction and restore. */ | ||
998 | dcbst r0,r14 /* write it to memory */ | ||
999 | sync | ||
1000 | icbi r0,r14 /* flush the icache line */ | ||
1001 | cmpw r12,r13 | ||
1002 | bne 1b | ||
1003 | |||
1004 | sync /* additional sync needed on g4 */ | ||
1005 | isync /* No speculative loading until now */ | ||
1006 | blr | ||
1007 | |||
1008 | /*********************************************************************** | ||
1009 | * Please note that on APUS the exception handlers are located at the | ||
1010 | * physical address 0xfff0000. For this reason, the exception handlers | ||
1011 | * cannot use relative branches to access the code below. | ||
1012 | ***********************************************************************/ | ||
1013 | #endif /* CONFIG_APUS */ | ||
1014 | |||
1015 | #ifdef CONFIG_SMP | ||
1016 | #ifdef CONFIG_GEMINI | ||
1017 | .globl __secondary_start_gemini | ||
1018 | __secondary_start_gemini: | ||
1019 | mfspr r4,SPRN_HID0 | ||
1020 | ori r4,r4,HID0_ICFI | ||
1021 | li r3,0 | ||
1022 | ori r3,r3,HID0_ICE | ||
1023 | andc r4,r4,r3 | ||
1024 | mtspr SPRN_HID0,r4 | ||
1025 | sync | ||
1026 | b __secondary_start | ||
1027 | #endif /* CONFIG_GEMINI */ | ||
1028 | |||
1029 | .globl __secondary_start_pmac_0 | ||
1030 | __secondary_start_pmac_0: | ||
1031 | /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ | ||
1032 | li r24,0 | ||
1033 | b 1f | ||
1034 | li r24,1 | ||
1035 | b 1f | ||
1036 | li r24,2 | ||
1037 | b 1f | ||
1038 | li r24,3 | ||
1039 | 1: | ||
1040 | /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0 | ||
1041 | set to map the 0xf0000000 - 0xffffffff region */ | ||
1042 | mfmsr r0 | ||
1043 | rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ | ||
1044 | SYNC | ||
1045 | mtmsr r0 | ||
1046 | isync | ||
1047 | |||
1048 | .globl __secondary_start | ||
1049 | __secondary_start: | ||
1050 | #ifdef CONFIG_PPC64BRIDGE | ||
1051 | mfmsr r0 | ||
1052 | clrldi r0,r0,1 /* make sure it's in 32-bit mode */ | ||
1053 | SYNC | ||
1054 | MTMSRD(r0) | ||
1055 | isync | ||
1056 | #endif | ||
1057 | /* Copy some CPU settings from CPU 0 */ | ||
1058 | bl __restore_cpu_setup | ||
1059 | |||
1060 | lis r3,-KERNELBASE@h | ||
1061 | mr r4,r24 | ||
1062 | bl identify_cpu | ||
1063 | bl call_setup_cpu /* Call setup_cpu for this CPU */ | ||
1064 | #ifdef CONFIG_6xx | ||
1065 | lis r3,-KERNELBASE@h | ||
1066 | bl init_idle_6xx | ||
1067 | #endif /* CONFIG_6xx */ | ||
1068 | #ifdef CONFIG_POWER4 | ||
1069 | lis r3,-KERNELBASE@h | ||
1070 | bl init_idle_power4 | ||
1071 | #endif /* CONFIG_POWER4 */ | ||
1072 | |||
1073 | /* get current_thread_info and current */ | ||
1074 | lis r1,secondary_ti@ha | ||
1075 | tophys(r1,r1) | ||
1076 | lwz r1,secondary_ti@l(r1) | ||
1077 | tophys(r2,r1) | ||
1078 | lwz r2,TI_TASK(r2) | ||
1079 | |||
1080 | /* stack */ | ||
1081 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | ||
1082 | li r0,0 | ||
1083 | tophys(r3,r1) | ||
1084 | stw r0,0(r3) | ||
1085 | |||
1086 | /* load up the MMU */ | ||
1087 | bl load_up_mmu | ||
1088 | |||
1089 | /* ptr to phys current thread */ | ||
1090 | tophys(r4,r2) | ||
1091 | addi r4,r4,THREAD /* phys address of our thread_struct */ | ||
1092 | CLR_TOP32(r4) | ||
1093 | mtspr SPRN_SPRG3,r4 | ||
1094 | li r3,0 | ||
1095 | mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */ | ||
1096 | |||
1097 | /* enable MMU and jump to start_secondary */ | ||
1098 | li r4,MSR_KERNEL | ||
1099 | FIX_SRR1(r4,r5) | ||
1100 | lis r3,start_secondary@h | ||
1101 | ori r3,r3,start_secondary@l | ||
1102 | mtspr SPRN_SRR0,r3 | ||
1103 | mtspr SPRN_SRR1,r4 | ||
1104 | SYNC | ||
1105 | RFI | ||
1106 | #endif /* CONFIG_SMP */ | ||
1107 | |||
1108 | /* | ||
1109 | * Those generic dummy functions are kept for CPUs not | ||
1110 | * included in CONFIG_6xx | ||
1111 | */ | ||
1112 | _GLOBAL(__setup_cpu_power3) | ||
1113 | blr | ||
1114 | _GLOBAL(__setup_cpu_generic) | ||
1115 | blr | ||
1116 | |||
1117 | #if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) | ||
1118 | _GLOBAL(__save_cpu_setup) | ||
1119 | blr | ||
1120 | _GLOBAL(__restore_cpu_setup) | ||
1121 | blr | ||
1122 | #endif /* !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) */ | ||
1123 | |||
1124 | |||
1125 | /* | ||
1126 | * Load stuff into the MMU. Intended to be called with | ||
1127 | * IR=0 and DR=0. | ||
1128 | */ | ||
1129 | load_up_mmu: | ||
1130 | sync /* Force all PTE updates to finish */ | ||
1131 | isync | ||
1132 | tlbia /* Clear all TLB entries */ | ||
1133 | sync /* wait for tlbia/tlbie to finish */ | ||
1134 | TLBSYNC /* ... on all CPUs */ | ||
1135 | /* Load the SDR1 register (hash table base & size) */ | ||
1136 | lis r6,_SDR1@ha | ||
1137 | tophys(r6,r6) | ||
1138 | lwz r6,_SDR1@l(r6) | ||
1139 | mtspr SPRN_SDR1,r6 | ||
1140 | #ifdef CONFIG_PPC64BRIDGE | ||
1141 | /* clear the ASR so we only use the pseudo-segment registers. */ | ||
1142 | li r6,0 | ||
1143 | mtasr r6 | ||
1144 | #endif /* CONFIG_PPC64BRIDGE */ | ||
1145 | li r0,16 /* load up segment register values */ | ||
1146 | mtctr r0 /* for context 0 */ | ||
1147 | lis r3,0x2000 /* Ku = 1, VSID = 0 */ | ||
1148 | li r4,0 | ||
1149 | 3: mtsrin r3,r4 | ||
1150 | addi r3,r3,0x111 /* increment VSID */ | ||
1151 | addis r4,r4,0x1000 /* address of next segment */ | ||
1152 | bdnz 3b | ||
1153 | #ifndef CONFIG_POWER4 | ||
1154 | /* Load the BAT registers with the values set up by MMU_init. | ||
1155 | MMU_init takes care of whether we're on a 601 or not. */ | ||
1156 | mfpvr r3 | ||
1157 | srwi r3,r3,16 | ||
1158 | cmpwi r3,1 | ||
1159 | lis r3,BATS@ha | ||
1160 | addi r3,r3,BATS@l | ||
1161 | tophys(r3,r3) | ||
1162 | LOAD_BAT(0,r3,r4,r5) | ||
1163 | LOAD_BAT(1,r3,r4,r5) | ||
1164 | LOAD_BAT(2,r3,r4,r5) | ||
1165 | LOAD_BAT(3,r3,r4,r5) | ||
1166 | #endif /* CONFIG_POWER4 */ | ||
1167 | blr | ||
1168 | |||
1169 | /* | ||
1170 | * This is where the main kernel code starts. | ||
1171 | */ | ||
1172 | start_here: | ||
1173 | /* ptr to current */ | ||
1174 | lis r2,init_task@h | ||
1175 | ori r2,r2,init_task@l | ||
1176 | /* Set up for using our exception vectors */ | ||
1177 | /* ptr to phys current thread */ | ||
1178 | tophys(r4,r2) | ||
1179 | addi r4,r4,THREAD /* init task's THREAD */ | ||
1180 | CLR_TOP32(r4) | ||
1181 | mtspr SPRN_SPRG3,r4 | ||
1182 | li r3,0 | ||
1183 | mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */ | ||
1184 | |||
1185 | /* stack */ | ||
1186 | lis r1,init_thread_union@ha | ||
1187 | addi r1,r1,init_thread_union@l | ||
1188 | li r0,0 | ||
1189 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | ||
1190 | /* | ||
1191 | * Do early bootinfo parsing, platform-specific initialization, | ||
1192 | * and set up the MMU. | ||
1193 | */ | ||
1194 | mr r3,r31 | ||
1195 | mr r4,r30 | ||
1196 | mr r5,r29 | ||
1197 | mr r6,r28 | ||
1198 | mr r7,r27 | ||
1199 | bl machine_init | ||
1200 | bl MMU_init | ||
1201 | |||
1202 | #ifdef CONFIG_APUS | ||
1203 | /* Copy exception code to exception vector base on APUS. */ | ||
1204 | lis r4,KERNELBASE@h | ||
1205 | #ifdef CONFIG_APUS_FAST_EXCEPT | ||
1206 | lis r3,0xfff0 /* Copy to 0xfff00000 */ | ||
1207 | #else | ||
1208 | lis r3,0 /* Copy to 0x00000000 */ | ||
1209 | #endif | ||
1210 | li r5,0x4000 /* # bytes of memory to copy */ | ||
1211 | li r6,0 | ||
1212 | bl copy_and_flush /* copy the first 0x4000 bytes */ | ||
1213 | #endif /* CONFIG_APUS */ | ||
1214 | |||
1215 | /* | ||
1216 | * Go back to running unmapped so we can load up new values | ||
1217 | * for SDR1 (hash table pointer) and the segment registers | ||
1218 | * and change to using our exception vectors. | ||
1219 | */ | ||
1220 | lis r4,2f@h | ||
1221 | ori r4,r4,2f@l | ||
1222 | tophys(r4,r4) | ||
1223 | li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) | ||
1224 | FIX_SRR1(r3,r5) | ||
1225 | mtspr SPRN_SRR0,r4 | ||
1226 | mtspr SPRN_SRR1,r3 | ||
1227 | SYNC | ||
1228 | RFI | ||
1229 | /* Load up the kernel context */ | ||
1230 | 2: bl load_up_mmu | ||
1231 | |||
1232 | #ifdef CONFIG_BDI_SWITCH | ||
1233 | /* Add helper information for the Abatron bdiGDB debugger. | ||
1234 | * We do this here because we know the mmu is disabled, and | ||
1235 | * will be enabled for real in just a few instructions. | ||
1236 | */ | ||
1237 | lis r5, abatron_pteptrs@h | ||
1238 | ori r5, r5, abatron_pteptrs@l | ||
1239 | stw r5, 0xf0(r0) /* This much match your Abatron config */ | ||
1240 | lis r6, swapper_pg_dir@h | ||
1241 | ori r6, r6, swapper_pg_dir@l | ||
1242 | tophys(r5, r5) | ||
1243 | stw r6, 0(r5) | ||
1244 | #endif /* CONFIG_BDI_SWITCH */ | ||
1245 | |||
1246 | /* Now turn on the MMU for real! */ | ||
1247 | li r4,MSR_KERNEL | ||
1248 | FIX_SRR1(r4,r5) | ||
1249 | lis r3,start_kernel@h | ||
1250 | ori r3,r3,start_kernel@l | ||
1251 | mtspr SPRN_SRR0,r3 | ||
1252 | mtspr SPRN_SRR1,r4 | ||
1253 | SYNC | ||
1254 | RFI | ||
1255 | |||
1256 | /* | ||
1257 | * Set up the segment registers for a new context. | ||
1258 | */ | ||
1259 | _GLOBAL(set_context) | ||
1260 | mulli r3,r3,897 /* multiply context by skew factor */ | ||
1261 | rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */ | ||
1262 | addis r3,r3,0x6000 /* Set Ks, Ku bits */ | ||
1263 | li r0,NUM_USER_SEGMENTS | ||
1264 | mtctr r0 | ||
1265 | |||
1266 | #ifdef CONFIG_BDI_SWITCH | ||
1267 | /* Context switch the PTE pointer for the Abatron BDI2000. | ||
1268 | * The PGDIR is passed as second argument. | ||
1269 | */ | ||
1270 | lis r5, KERNELBASE@h | ||
1271 | lwz r5, 0xf0(r5) | ||
1272 | stw r4, 0x4(r5) | ||
1273 | #endif | ||
1274 | li r4,0 | ||
1275 | isync | ||
1276 | 3: | ||
1277 | #ifdef CONFIG_PPC64BRIDGE | ||
1278 | slbie r4 | ||
1279 | #endif /* CONFIG_PPC64BRIDGE */ | ||
1280 | mtsrin r3,r4 | ||
1281 | addi r3,r3,0x111 /* next VSID */ | ||
1282 | rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */ | ||
1283 | addis r4,r4,0x1000 /* address of next segment */ | ||
1284 | bdnz 3b | ||
1285 | sync | ||
1286 | isync | ||
1287 | blr | ||
1288 | |||
1289 | /* | ||
1290 | * An undocumented "feature" of 604e requires that the v bit | ||
1291 | * be cleared before changing BAT values. | ||
1292 | * | ||
1293 | * Also, newer IBM firmware does not clear bat3 and 4 so | ||
1294 | * this makes sure it's done. | ||
1295 | * -- Cort | ||
1296 | */ | ||
1297 | clear_bats: | ||
1298 | li r10,0 | ||
1299 | mfspr r9,SPRN_PVR | ||
1300 | rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ | ||
1301 | cmpwi r9, 1 | ||
1302 | beq 1f | ||
1303 | |||
1304 | mtspr SPRN_DBAT0U,r10 | ||
1305 | mtspr SPRN_DBAT0L,r10 | ||
1306 | mtspr SPRN_DBAT1U,r10 | ||
1307 | mtspr SPRN_DBAT1L,r10 | ||
1308 | mtspr SPRN_DBAT2U,r10 | ||
1309 | mtspr SPRN_DBAT2L,r10 | ||
1310 | mtspr SPRN_DBAT3U,r10 | ||
1311 | mtspr SPRN_DBAT3L,r10 | ||
1312 | 1: | ||
1313 | mtspr SPRN_IBAT0U,r10 | ||
1314 | mtspr SPRN_IBAT0L,r10 | ||
1315 | mtspr SPRN_IBAT1U,r10 | ||
1316 | mtspr SPRN_IBAT1L,r10 | ||
1317 | mtspr SPRN_IBAT2U,r10 | ||
1318 | mtspr SPRN_IBAT2L,r10 | ||
1319 | mtspr SPRN_IBAT3U,r10 | ||
1320 | mtspr SPRN_IBAT3L,r10 | ||
1321 | BEGIN_FTR_SECTION | ||
1322 | /* Here's a tweak: at this point, CPU setup have | ||
1323 | * not been called yet, so HIGH_BAT_EN may not be | ||
1324 | * set in HID0 for the 745x processors. However, it | ||
1325 | * seems that doesn't affect our ability to actually | ||
1326 | * write to these SPRs. | ||
1327 | */ | ||
1328 | mtspr SPRN_DBAT4U,r10 | ||
1329 | mtspr SPRN_DBAT4L,r10 | ||
1330 | mtspr SPRN_DBAT5U,r10 | ||
1331 | mtspr SPRN_DBAT5L,r10 | ||
1332 | mtspr SPRN_DBAT6U,r10 | ||
1333 | mtspr SPRN_DBAT6L,r10 | ||
1334 | mtspr SPRN_DBAT7U,r10 | ||
1335 | mtspr SPRN_DBAT7L,r10 | ||
1336 | mtspr SPRN_IBAT4U,r10 | ||
1337 | mtspr SPRN_IBAT4L,r10 | ||
1338 | mtspr SPRN_IBAT5U,r10 | ||
1339 | mtspr SPRN_IBAT5L,r10 | ||
1340 | mtspr SPRN_IBAT6U,r10 | ||
1341 | mtspr SPRN_IBAT6L,r10 | ||
1342 | mtspr SPRN_IBAT7U,r10 | ||
1343 | mtspr SPRN_IBAT7L,r10 | ||
1344 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) | ||
1345 | blr | ||
1346 | |||
1347 | flush_tlbs: | ||
1348 | lis r10, 0x40 | ||
1349 | 1: addic. r10, r10, -0x1000 | ||
1350 | tlbie r10 | ||
1351 | blt 1b | ||
1352 | sync | ||
1353 | blr | ||
1354 | |||
1355 | mmu_off: | ||
1356 | addi r4, r3, __after_mmu_off - _start | ||
1357 | mfmsr r3 | ||
1358 | andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ | ||
1359 | beqlr | ||
1360 | andc r3,r3,r0 | ||
1361 | mtspr SPRN_SRR0,r4 | ||
1362 | mtspr SPRN_SRR1,r3 | ||
1363 | sync | ||
1364 | RFI | ||
1365 | |||
1366 | #ifndef CONFIG_POWER4 | ||
1367 | /* | ||
1368 | * Use the first pair of BAT registers to map the 1st 16MB | ||
1369 | * of RAM to KERNELBASE. From this point on we can't safely | ||
1370 | * call OF any more. | ||
1371 | */ | ||
1372 | initial_bats: | ||
1373 | lis r11,KERNELBASE@h | ||
1374 | #ifndef CONFIG_PPC64BRIDGE | ||
1375 | mfspr r9,SPRN_PVR | ||
1376 | rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ | ||
1377 | cmpwi 0,r9,1 | ||
1378 | bne 4f | ||
1379 | ori r11,r11,4 /* set up BAT registers for 601 */ | ||
1380 | li r8,0x7f /* valid, block length = 8MB */ | ||
1381 | oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */ | ||
1382 | oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */ | ||
1383 | mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */ | ||
1384 | mtspr SPRN_IBAT0L,r8 /* lower BAT register */ | ||
1385 | mtspr SPRN_IBAT1U,r9 | ||
1386 | mtspr SPRN_IBAT1L,r10 | ||
1387 | isync | ||
1388 | blr | ||
1389 | #endif /* CONFIG_PPC64BRIDGE */ | ||
1390 | |||
1391 | 4: tophys(r8,r11) | ||
1392 | #ifdef CONFIG_SMP | ||
1393 | ori r8,r8,0x12 /* R/W access, M=1 */ | ||
1394 | #else | ||
1395 | ori r8,r8,2 /* R/W access */ | ||
1396 | #endif /* CONFIG_SMP */ | ||
1397 | #ifdef CONFIG_APUS | ||
1398 | ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */ | ||
1399 | #else | ||
1400 | ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */ | ||
1401 | #endif /* CONFIG_APUS */ | ||
1402 | |||
1403 | #ifdef CONFIG_PPC64BRIDGE | ||
1404 | /* clear out the high 32 bits in the BAT */ | ||
1405 | clrldi r11,r11,32 | ||
1406 | clrldi r8,r8,32 | ||
1407 | #endif /* CONFIG_PPC64BRIDGE */ | ||
1408 | mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */ | ||
1409 | mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */ | ||
1410 | mtspr SPRN_IBAT0L,r8 | ||
1411 | mtspr SPRN_IBAT0U,r11 | ||
1412 | isync | ||
1413 | blr | ||
1414 | |||
1415 | #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) | ||
1416 | setup_disp_bat: | ||
1417 | /* | ||
1418 | * setup the display bat prepared for us in prom.c | ||
1419 | */ | ||
1420 | mflr r8 | ||
1421 | bl reloc_offset | ||
1422 | mtlr r8 | ||
1423 | addis r8,r3,disp_BAT@ha | ||
1424 | addi r8,r8,disp_BAT@l | ||
1425 | lwz r11,0(r8) | ||
1426 | lwz r8,4(r8) | ||
1427 | mfspr r9,SPRN_PVR | ||
1428 | rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ | ||
1429 | cmpwi 0,r9,1 | ||
1430 | beq 1f | ||
1431 | mtspr SPRN_DBAT3L,r8 | ||
1432 | mtspr SPRN_DBAT3U,r11 | ||
1433 | blr | ||
1434 | 1: mtspr SPRN_IBAT3L,r8 | ||
1435 | mtspr SPRN_IBAT3U,r11 | ||
1436 | blr | ||
1437 | |||
1438 | #endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */ | ||
1439 | |||
1440 | #else /* CONFIG_POWER4 */ | ||
1441 | /* | ||
1442 | * Load up the SDR1 and segment register values now | ||
1443 | * since we don't have the BATs. | ||
1444 | * Also make sure we are running in 32-bit mode. | ||
1445 | */ | ||
1446 | |||
1447 | initial_mm_power4: | ||
1448 | addis r14,r3,_SDR1@ha /* get the value from _SDR1 */ | ||
1449 | lwz r14,_SDR1@l(r14) /* assume hash table below 4GB */ | ||
1450 | mtspr SPRN_SDR1,r14 | ||
1451 | slbia | ||
1452 | lis r4,0x2000 /* set pseudo-segment reg 12 */ | ||
1453 | ori r5,r4,0x0ccc | ||
1454 | mtsr 12,r5 | ||
1455 | #if 0 | ||
1456 | ori r5,r4,0x0888 /* set pseudo-segment reg 8 */ | ||
1457 | mtsr 8,r5 /* (for access to serial port) */ | ||
1458 | #endif | ||
1459 | #ifdef CONFIG_BOOTX_TEXT | ||
1460 | ori r5,r4,0x0999 /* set pseudo-segment reg 9 */ | ||
1461 | mtsr 9,r5 /* (for access to screen) */ | ||
1462 | #endif | ||
1463 | mfmsr r0 | ||
1464 | clrldi r0,r0,1 | ||
1465 | sync | ||
1466 | mtmsr r0 | ||
1467 | isync | ||
1468 | blr | ||
1469 | |||
1470 | #endif /* CONFIG_POWER4 */ | ||
1471 | |||
1472 | #ifdef CONFIG_8260 | ||
1473 | /* Jump into the system reset for the rom. | ||
1474 | * We first disable the MMU, and then jump to the ROM reset address. | ||
1475 | * | ||
1476 | * r3 is the board info structure, r4 is the location for starting. | ||
1477 | * I use this for building a small kernel that can load other kernels, | ||
1478 | * rather than trying to write or rely on a rom monitor that can tftp load. | ||
1479 | */ | ||
1480 | .globl m8260_gorom | ||
1481 | m8260_gorom: | ||
1482 | mfmsr r0 | ||
1483 | rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */ | ||
1484 | sync | ||
1485 | mtmsr r0 | ||
1486 | sync | ||
1487 | mfspr r11, SPRN_HID0 | ||
1488 | lis r10, 0 | ||
1489 | ori r10,r10,HID0_ICE|HID0_DCE | ||
1490 | andc r11, r11, r10 | ||
1491 | mtspr SPRN_HID0, r11 | ||
1492 | isync | ||
1493 | li r5, MSR_ME|MSR_RI | ||
1494 | lis r6,2f@h | ||
1495 | addis r6,r6,-KERNELBASE@h | ||
1496 | ori r6,r6,2f@l | ||
1497 | mtspr SPRN_SRR0,r6 | ||
1498 | mtspr SPRN_SRR1,r5 | ||
1499 | isync | ||
1500 | sync | ||
1501 | rfi | ||
1502 | 2: | ||
1503 | mtlr r4 | ||
1504 | blr | ||
1505 | #endif | ||
1506 | |||
1507 | |||
1508 | /* | ||
1509 | * We put a few things here that have to be page-aligned. | ||
1510 | * This stuff goes at the beginning of the data segment, | ||
1511 | * which is page-aligned. | ||
1512 | */ | ||
1513 | .data | ||
1514 | .globl sdata | ||
1515 | sdata: | ||
1516 | .globl empty_zero_page | ||
1517 | empty_zero_page: | ||
1518 | .space 4096 | ||
1519 | |||
1520 | .globl swapper_pg_dir | ||
1521 | swapper_pg_dir: | ||
1522 | .space 4096 | ||
1523 | |||
1524 | /* | ||
1525 | * This space gets a copy of optional info passed to us by the bootstrap | ||
1526 | * Used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
1527 | */ | ||
1528 | .globl cmd_line | ||
1529 | cmd_line: | ||
1530 | .space 512 | ||
1531 | |||
1532 | .globl intercept_table | ||
1533 | intercept_table: | ||
1534 | .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700 | ||
1535 | .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0 | ||
1536 | .long 0, 0, 0, i0x1300, 0, 0, 0, 0 | ||
1537 | .long 0, 0, 0, 0, 0, 0, 0, 0 | ||
1538 | .long 0, 0, 0, 0, 0, 0, 0, 0 | ||
1539 | .long 0, 0, 0, 0, 0, 0, 0, 0 | ||
1540 | |||
1541 | /* Room for two PTE pointers, usually the kernel and current user pointers | ||
1542 | * to their respective root page table. | ||
1543 | */ | ||
1544 | abatron_pteptrs: | ||
1545 | .space 8 | ||
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S new file mode 100644 index 000000000000..599245b0407e --- /dev/null +++ b/arch/powerpc/kernel/head_44x.S | |||
@@ -0,0 +1,778 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/head_44x.S | ||
3 | * | ||
4 | * Kernel execution entry point code. | ||
5 | * | ||
6 | * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> | ||
7 | * Initial PowerPC version. | ||
8 | * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
9 | * Rewritten for PReP | ||
10 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | ||
11 | * Low-level exception handers, MMU support, and rewrite. | ||
12 | * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> | ||
13 | * PowerPC 8xx modifications. | ||
14 | * Copyright (c) 1998-1999 TiVo, Inc. | ||
15 | * PowerPC 403GCX modifications. | ||
16 | * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> | ||
17 | * PowerPC 403GCX/405GP modifications. | ||
18 | * Copyright 2000 MontaVista Software Inc. | ||
19 | * PPC405 modifications | ||
20 | * PowerPC 403GCX/405GP modifications. | ||
21 | * Author: MontaVista Software, Inc. | ||
22 | * frank_rowand@mvista.com or source@mvista.com | ||
23 | * debbie_chu@mvista.com | ||
24 | * Copyright 2002-2005 MontaVista Software, Inc. | ||
25 | * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> | ||
26 | * | ||
27 | * This program is free software; you can redistribute it and/or modify it | ||
28 | * under the terms of the GNU General Public License as published by the | ||
29 | * Free Software Foundation; either version 2 of the License, or (at your | ||
30 | * option) any later version. | ||
31 | */ | ||
32 | |||
33 | #include <linux/config.h> | ||
34 | #include <asm/processor.h> | ||
35 | #include <asm/page.h> | ||
36 | #include <asm/mmu.h> | ||
37 | #include <asm/pgtable.h> | ||
38 | #include <asm/ibm4xx.h> | ||
39 | #include <asm/ibm44x.h> | ||
40 | #include <asm/cputable.h> | ||
41 | #include <asm/thread_info.h> | ||
42 | #include <asm/ppc_asm.h> | ||
43 | #include <asm/asm-offsets.h> | ||
44 | #include "head_booke.h" | ||
45 | |||
46 | |||
47 | /* As with the other PowerPC ports, it is expected that when code | ||
48 | * execution begins here, the following registers contain valid, yet | ||
49 | * optional, information: | ||
50 | * | ||
51 | * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) | ||
52 | * r4 - Starting address of the init RAM disk | ||
53 | * r5 - Ending address of the init RAM disk | ||
54 | * r6 - Start of kernel command line string (e.g. "mem=128") | ||
55 | * r7 - End of kernel command line string | ||
56 | * | ||
57 | */ | ||
58 | .text | ||
59 | _GLOBAL(_stext) | ||
60 | _GLOBAL(_start) | ||
61 | /* | ||
62 | * Reserve a word at a fixed location to store the address | ||
63 | * of abatron_pteptrs | ||
64 | */ | ||
65 | nop | ||
66 | /* | ||
67 | * Save parameters we are passed | ||
68 | */ | ||
69 | mr r31,r3 | ||
70 | mr r30,r4 | ||
71 | mr r29,r5 | ||
72 | mr r28,r6 | ||
73 | mr r27,r7 | ||
74 | li r24,0 /* CPU number */ | ||
75 | |||
76 | /* | ||
77 | * Set up the initial MMU state | ||
78 | * | ||
79 | * We are still executing code at the virtual address | ||
80 | * mappings set by the firmware for the base of RAM. | ||
81 | * | ||
82 | * We first invalidate all TLB entries but the one | ||
83 | * we are running from. We then load the KERNELBASE | ||
84 | * mappings so we can begin to use kernel addresses | ||
85 | * natively and so the interrupt vector locations are | ||
86 | * permanently pinned (necessary since Book E | ||
87 | * implementations always have translation enabled). | ||
88 | * | ||
89 | * TODO: Use the known TLB entry we are running from to | ||
90 | * determine which physical region we are located | ||
91 | * in. This can be used to determine where in RAM | ||
92 | * (on a shared CPU system) or PCI memory space | ||
93 | * (on a DRAMless system) we are located. | ||
94 | * For now, we assume a perfect world which means | ||
95 | * we are located at the base of DRAM (physical 0). | ||
96 | */ | ||
97 | |||
98 | /* | ||
99 | * Search TLB for entry that we are currently using. | ||
100 | * Invalidate all entries but the one we are using. | ||
101 | */ | ||
102 | /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ | ||
103 | mfspr r3,SPRN_PID /* Get PID */ | ||
104 | mfmsr r4 /* Get MSR */ | ||
105 | andi. r4,r4,MSR_IS@l /* TS=1? */ | ||
106 | beq wmmucr /* If not, leave STS=0 */ | ||
107 | oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */ | ||
108 | wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ | ||
109 | sync | ||
110 | |||
111 | bl invstr /* Find our address */ | ||
112 | invstr: mflr r5 /* Make it accessible */ | ||
113 | tlbsx r23,0,r5 /* Find entry we are in */ | ||
114 | li r4,0 /* Start at TLB entry 0 */ | ||
115 | li r3,0 /* Set PAGEID inval value */ | ||
116 | 1: cmpw r23,r4 /* Is this our entry? */ | ||
117 | beq skpinv /* If so, skip the inval */ | ||
118 | tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ | ||
119 | skpinv: addi r4,r4,1 /* Increment */ | ||
120 | cmpwi r4,64 /* Are we done? */ | ||
121 | bne 1b /* If not, repeat */ | ||
122 | isync /* If so, context change */ | ||
123 | |||
124 | /* | ||
125 | * Configure and load pinned entry into TLB slot 63. | ||
126 | */ | ||
127 | |||
128 | lis r3,KERNELBASE@h /* Load the kernel virtual address */ | ||
129 | ori r3,r3,KERNELBASE@l | ||
130 | |||
131 | /* Kernel is at the base of RAM */ | ||
132 | li r4, 0 /* Load the kernel physical address */ | ||
133 | |||
134 | /* Load the kernel PID = 0 */ | ||
135 | li r0,0 | ||
136 | mtspr SPRN_PID,r0 | ||
137 | sync | ||
138 | |||
139 | /* Initialize MMUCR */ | ||
140 | li r5,0 | ||
141 | mtspr SPRN_MMUCR,r5 | ||
142 | sync | ||
143 | |||
144 | /* pageid fields */ | ||
145 | clrrwi r3,r3,10 /* Mask off the effective page number */ | ||
146 | ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M | ||
147 | |||
148 | /* xlat fields */ | ||
149 | clrrwi r4,r4,10 /* Mask off the real page number */ | ||
150 | /* ERPN is 0 for first 4GB page */ | ||
151 | |||
152 | /* attrib fields */ | ||
153 | /* Added guarded bit to protect against speculative loads/stores */ | ||
154 | li r5,0 | ||
155 | ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) | ||
156 | |||
157 | li r0,63 /* TLB slot 63 */ | ||
158 | |||
159 | tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ | ||
160 | tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ | ||
161 | tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ | ||
162 | |||
163 | /* Force context change */ | ||
164 | mfmsr r0 | ||
165 | mtspr SPRN_SRR1, r0 | ||
166 | lis r0,3f@h | ||
167 | ori r0,r0,3f@l | ||
168 | mtspr SPRN_SRR0,r0 | ||
169 | sync | ||
170 | rfi | ||
171 | |||
172 | /* If necessary, invalidate original entry we used */ | ||
173 | 3: cmpwi r23,63 | ||
174 | beq 4f | ||
175 | li r6,0 | ||
176 | tlbwe r6,r23,PPC44x_TLB_PAGEID | ||
177 | isync | ||
178 | |||
179 | 4: | ||
180 | #ifdef CONFIG_SERIAL_TEXT_DEBUG | ||
181 | /* | ||
182 | * Add temporary UART mapping for early debug. | ||
183 | * We can map UART registers wherever we want as long as they don't | ||
184 | * interfere with other system mappings (e.g. with pinned entries). | ||
185 | * For an example of how we handle this - see ocotea.h. --ebs | ||
186 | */ | ||
187 | /* pageid fields */ | ||
188 | lis r3,UART0_IO_BASE@h | ||
189 | ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_4K | ||
190 | |||
191 | /* xlat fields */ | ||
192 | lis r4,UART0_PHYS_IO_BASE@h /* RPN depends on SoC */ | ||
193 | #ifndef CONFIG_440EP | ||
194 | ori r4,r4,0x0001 /* ERPN is 1 for second 4GB page */ | ||
195 | #endif | ||
196 | |||
197 | /* attrib fields */ | ||
198 | li r5,0 | ||
199 | ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_I | PPC44x_TLB_G) | ||
200 | |||
201 | li r0,0 /* TLB slot 0 */ | ||
202 | |||
203 | tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ | ||
204 | tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ | ||
205 | tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ | ||
206 | |||
207 | /* Force context change */ | ||
208 | isync | ||
209 | #endif /* CONFIG_SERIAL_TEXT_DEBUG */ | ||
210 | |||
211 | /* Establish the interrupt vector offsets */ | ||
212 | SET_IVOR(0, CriticalInput); | ||
213 | SET_IVOR(1, MachineCheck); | ||
214 | SET_IVOR(2, DataStorage); | ||
215 | SET_IVOR(3, InstructionStorage); | ||
216 | SET_IVOR(4, ExternalInput); | ||
217 | SET_IVOR(5, Alignment); | ||
218 | SET_IVOR(6, Program); | ||
219 | SET_IVOR(7, FloatingPointUnavailable); | ||
220 | SET_IVOR(8, SystemCall); | ||
221 | SET_IVOR(9, AuxillaryProcessorUnavailable); | ||
222 | SET_IVOR(10, Decrementer); | ||
223 | SET_IVOR(11, FixedIntervalTimer); | ||
224 | SET_IVOR(12, WatchdogTimer); | ||
225 | SET_IVOR(13, DataTLBError); | ||
226 | SET_IVOR(14, InstructionTLBError); | ||
227 | SET_IVOR(15, Debug); | ||
228 | |||
229 | /* Establish the interrupt vector base */ | ||
230 | lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ | ||
231 | mtspr SPRN_IVPR,r4 | ||
232 | |||
233 | #ifdef CONFIG_440EP | ||
234 | /* Clear DAPUIB flag in CCR0 (enable APU between CPU and FPU) */ | ||
235 | mfspr r2,SPRN_CCR0 | ||
236 | lis r3,0xffef | ||
237 | ori r3,r3,0xffff | ||
238 | and r2,r2,r3 | ||
239 | mtspr SPRN_CCR0,r2 | ||
240 | isync | ||
241 | #endif | ||
242 | |||
243 | /* | ||
244 | * This is where the main kernel code starts. | ||
245 | */ | ||
246 | |||
247 | /* ptr to current */ | ||
248 | lis r2,init_task@h | ||
249 | ori r2,r2,init_task@l | ||
250 | |||
251 | /* ptr to current thread */ | ||
252 | addi r4,r2,THREAD /* init task's THREAD */ | ||
253 | mtspr SPRN_SPRG3,r4 | ||
254 | |||
255 | /* stack */ | ||
256 | lis r1,init_thread_union@h | ||
257 | ori r1,r1,init_thread_union@l | ||
258 | li r0,0 | ||
259 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | ||
260 | |||
261 | bl early_init | ||
262 | |||
263 | /* | ||
264 | * Decide what sort of machine this is and initialize the MMU. | ||
265 | */ | ||
266 | mr r3,r31 | ||
267 | mr r4,r30 | ||
268 | mr r5,r29 | ||
269 | mr r6,r28 | ||
270 | mr r7,r27 | ||
271 | bl machine_init | ||
272 | bl MMU_init | ||
273 | |||
274 | /* Setup PTE pointers for the Abatron bdiGDB */ | ||
275 | lis r6, swapper_pg_dir@h | ||
276 | ori r6, r6, swapper_pg_dir@l | ||
277 | lis r5, abatron_pteptrs@h | ||
278 | ori r5, r5, abatron_pteptrs@l | ||
279 | lis r4, KERNELBASE@h | ||
280 | ori r4, r4, KERNELBASE@l | ||
281 | stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ | ||
282 | stw r6, 0(r5) | ||
283 | |||
284 | /* Let's move on */ | ||
285 | lis r4,start_kernel@h | ||
286 | ori r4,r4,start_kernel@l | ||
287 | lis r3,MSR_KERNEL@h | ||
288 | ori r3,r3,MSR_KERNEL@l | ||
289 | mtspr SPRN_SRR0,r4 | ||
290 | mtspr SPRN_SRR1,r3 | ||
291 | rfi /* change context and jump to start_kernel */ | ||
292 | |||
293 | /* | ||
294 | * Interrupt vector entry code | ||
295 | * | ||
296 | * The Book E MMUs are always on so we don't need to handle | ||
297 | * interrupts in real mode as with previous PPC processors. In | ||
298 | * this case we handle interrupts in the kernel virtual address | ||
299 | * space. | ||
300 | * | ||
301 | * Interrupt vectors are dynamically placed relative to the | ||
302 | * interrupt prefix as determined by the address of interrupt_base. | ||
303 | * The interrupt vectors offsets are programmed using the labels | ||
304 | * for each interrupt vector entry. | ||
305 | * | ||
306 | * Interrupt vectors must be aligned on a 16 byte boundary. | ||
307 | * We align on a 32 byte cache line boundary for good measure. | ||
308 | */ | ||
309 | |||
310 | interrupt_base: | ||
311 | /* Critical Input Interrupt */ | ||
312 | CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException) | ||
313 | |||
314 | /* Machine Check Interrupt */ | ||
315 | #ifdef CONFIG_440A | ||
316 | MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException) | ||
317 | #else | ||
318 | CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) | ||
319 | #endif | ||
320 | |||
321 | /* Data Storage Interrupt */ | ||
322 | START_EXCEPTION(DataStorage) | ||
323 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
324 | mtspr SPRN_SPRG1, r11 | ||
325 | mtspr SPRN_SPRG4W, r12 | ||
326 | mtspr SPRN_SPRG5W, r13 | ||
327 | mfcr r11 | ||
328 | mtspr SPRN_SPRG7W, r11 | ||
329 | |||
330 | /* | ||
331 | * Check if it was a store fault, if not then bail | ||
332 | * because a user tried to access a kernel or | ||
333 | * read-protected page. Otherwise, get the | ||
334 | * offending address and handle it. | ||
335 | */ | ||
336 | mfspr r10, SPRN_ESR | ||
337 | andis. r10, r10, ESR_ST@h | ||
338 | beq 2f | ||
339 | |||
340 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
341 | |||
342 | /* If we are faulting a kernel address, we have to use the | ||
343 | * kernel page tables. | ||
344 | */ | ||
345 | lis r11, TASK_SIZE@h | ||
346 | cmplw r10, r11 | ||
347 | blt+ 3f | ||
348 | lis r11, swapper_pg_dir@h | ||
349 | ori r11, r11, swapper_pg_dir@l | ||
350 | |||
351 | mfspr r12,SPRN_MMUCR | ||
352 | rlwinm r12,r12,0,0,23 /* Clear TID */ | ||
353 | |||
354 | b 4f | ||
355 | |||
356 | /* Get the PGD for the current thread */ | ||
357 | 3: | ||
358 | mfspr r11,SPRN_SPRG3 | ||
359 | lwz r11,PGDIR(r11) | ||
360 | |||
361 | /* Load PID into MMUCR TID */ | ||
362 | mfspr r12,SPRN_MMUCR /* Get MMUCR */ | ||
363 | mfspr r13,SPRN_PID /* Get PID */ | ||
364 | rlwimi r12,r13,0,24,31 /* Set TID */ | ||
365 | |||
366 | 4: | ||
367 | mtspr SPRN_MMUCR,r12 | ||
368 | |||
369 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ | ||
370 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ | ||
371 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ | ||
372 | beq 2f /* Bail if no table */ | ||
373 | |||
374 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ | ||
375 | lwz r11, 4(r12) /* Get pte entry */ | ||
376 | |||
377 | andi. r13, r11, _PAGE_RW /* Is it writeable? */ | ||
378 | beq 2f /* Bail if not */ | ||
379 | |||
380 | /* Update 'changed'. | ||
381 | */ | ||
382 | ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE | ||
383 | stw r11, 4(r12) /* Update Linux page table */ | ||
384 | |||
385 | li r13, PPC44x_TLB_SR@l /* Set SR */ | ||
386 | rlwimi r13, r11, 29, 29, 29 /* SX = _PAGE_HWEXEC */ | ||
387 | rlwimi r13, r11, 0, 30, 30 /* SW = _PAGE_RW */ | ||
388 | rlwimi r13, r11, 29, 28, 28 /* UR = _PAGE_USER */ | ||
389 | rlwimi r12, r11, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */ | ||
390 | rlwimi r12, r11, 29, 30, 30 /* (_PAGE_USER>>3)->r12 */ | ||
391 | and r12, r12, r11 /* HWEXEC/RW & USER */ | ||
392 | rlwimi r13, r12, 0, 26, 26 /* UX = HWEXEC & USER */ | ||
393 | rlwimi r13, r12, 3, 27, 27 /* UW = RW & USER */ | ||
394 | |||
395 | rlwimi r11,r13,0,26,31 /* Insert static perms */ | ||
396 | |||
397 | rlwinm r11,r11,0,20,15 /* Clear U0-U3 */ | ||
398 | |||
399 | /* find the TLB index that caused the fault. It has to be here. */ | ||
400 | tlbsx r10, 0, r10 | ||
401 | |||
402 | tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */ | ||
403 | |||
404 | /* Done...restore registers and get out of here. | ||
405 | */ | ||
406 | mfspr r11, SPRN_SPRG7R | ||
407 | mtcr r11 | ||
408 | mfspr r13, SPRN_SPRG5R | ||
409 | mfspr r12, SPRN_SPRG4R | ||
410 | |||
411 | mfspr r11, SPRN_SPRG1 | ||
412 | mfspr r10, SPRN_SPRG0 | ||
413 | rfi /* Force context change */ | ||
414 | |||
415 | 2: | ||
416 | /* | ||
417 | * The bailout. Restore registers to pre-exception conditions | ||
418 | * and call the heavyweights to help us out. | ||
419 | */ | ||
420 | mfspr r11, SPRN_SPRG7R | ||
421 | mtcr r11 | ||
422 | mfspr r13, SPRN_SPRG5R | ||
423 | mfspr r12, SPRN_SPRG4R | ||
424 | |||
425 | mfspr r11, SPRN_SPRG1 | ||
426 | mfspr r10, SPRN_SPRG0 | ||
427 | b data_access | ||
428 | |||
429 | /* Instruction Storage Interrupt */ | ||
430 | INSTRUCTION_STORAGE_EXCEPTION | ||
431 | |||
432 | /* External Input Interrupt */ | ||
433 | EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) | ||
434 | |||
435 | /* Alignment Interrupt */ | ||
436 | ALIGNMENT_EXCEPTION | ||
437 | |||
438 | /* Program Interrupt */ | ||
439 | PROGRAM_EXCEPTION | ||
440 | |||
441 | /* Floating Point Unavailable Interrupt */ | ||
442 | #ifdef CONFIG_PPC_FPU | ||
443 | FP_UNAVAILABLE_EXCEPTION | ||
444 | #else | ||
445 | EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) | ||
446 | #endif | ||
447 | |||
448 | /* System Call Interrupt */ | ||
449 | START_EXCEPTION(SystemCall) | ||
450 | NORMAL_EXCEPTION_PROLOG | ||
451 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) | ||
452 | |||
453 | /* Auxillary Processor Unavailable Interrupt */ | ||
454 | EXCEPTION(0x2020, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE) | ||
455 | |||
456 | /* Decrementer Interrupt */ | ||
457 | DECREMENTER_EXCEPTION | ||
458 | |||
459 | /* Fixed Internal Timer Interrupt */ | ||
460 | /* TODO: Add FIT support */ | ||
461 | EXCEPTION(0x1010, FixedIntervalTimer, UnknownException, EXC_XFER_EE) | ||
462 | |||
463 | /* Watchdog Timer Interrupt */ | ||
464 | /* TODO: Add watchdog support */ | ||
465 | #ifdef CONFIG_BOOKE_WDT | ||
466 | CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException) | ||
467 | #else | ||
468 | CRITICAL_EXCEPTION(0x1020, WatchdogTimer, UnknownException) | ||
469 | #endif | ||
470 | |||
471 | /* Data TLB Error Interrupt */ | ||
472 | START_EXCEPTION(DataTLBError) | ||
473 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
474 | mtspr SPRN_SPRG1, r11 | ||
475 | mtspr SPRN_SPRG4W, r12 | ||
476 | mtspr SPRN_SPRG5W, r13 | ||
477 | mfcr r11 | ||
478 | mtspr SPRN_SPRG7W, r11 | ||
479 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
480 | |||
481 | /* If we are faulting a kernel address, we have to use the | ||
482 | * kernel page tables. | ||
483 | */ | ||
484 | lis r11, TASK_SIZE@h | ||
485 | cmplw r10, r11 | ||
486 | blt+ 3f | ||
487 | lis r11, swapper_pg_dir@h | ||
488 | ori r11, r11, swapper_pg_dir@l | ||
489 | |||
490 | mfspr r12,SPRN_MMUCR | ||
491 | rlwinm r12,r12,0,0,23 /* Clear TID */ | ||
492 | |||
493 | b 4f | ||
494 | |||
495 | /* Get the PGD for the current thread */ | ||
496 | 3: | ||
497 | mfspr r11,SPRN_SPRG3 | ||
498 | lwz r11,PGDIR(r11) | ||
499 | |||
500 | /* Load PID into MMUCR TID */ | ||
501 | mfspr r12,SPRN_MMUCR | ||
502 | mfspr r13,SPRN_PID /* Get PID */ | ||
503 | rlwimi r12,r13,0,24,31 /* Set TID */ | ||
504 | |||
505 | 4: | ||
506 | mtspr SPRN_MMUCR,r12 | ||
507 | |||
508 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ | ||
509 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ | ||
510 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ | ||
511 | beq 2f /* Bail if no table */ | ||
512 | |||
513 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ | ||
514 | lwz r11, 4(r12) /* Get pte entry */ | ||
515 | andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ | ||
516 | beq 2f /* Bail if not present */ | ||
517 | |||
518 | ori r11, r11, _PAGE_ACCESSED | ||
519 | stw r11, 4(r12) | ||
520 | |||
521 | /* Jump to common tlb load */ | ||
522 | b finish_tlb_load | ||
523 | |||
524 | 2: | ||
525 | /* The bailout. Restore registers to pre-exception conditions | ||
526 | * and call the heavyweights to help us out. | ||
527 | */ | ||
528 | mfspr r11, SPRN_SPRG7R | ||
529 | mtcr r11 | ||
530 | mfspr r13, SPRN_SPRG5R | ||
531 | mfspr r12, SPRN_SPRG4R | ||
532 | mfspr r11, SPRN_SPRG1 | ||
533 | mfspr r10, SPRN_SPRG0 | ||
534 | b data_access | ||
535 | |||
536 | /* Instruction TLB Error Interrupt */ | ||
537 | /* | ||
538 | * Nearly the same as above, except we get our | ||
539 | * information from different registers and bailout | ||
540 | * to a different point. | ||
541 | */ | ||
542 | START_EXCEPTION(InstructionTLBError) | ||
543 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
544 | mtspr SPRN_SPRG1, r11 | ||
545 | mtspr SPRN_SPRG4W, r12 | ||
546 | mtspr SPRN_SPRG5W, r13 | ||
547 | mfcr r11 | ||
548 | mtspr SPRN_SPRG7W, r11 | ||
549 | mfspr r10, SPRN_SRR0 /* Get faulting address */ | ||
550 | |||
551 | /* If we are faulting a kernel address, we have to use the | ||
552 | * kernel page tables. | ||
553 | */ | ||
554 | lis r11, TASK_SIZE@h | ||
555 | cmplw r10, r11 | ||
556 | blt+ 3f | ||
557 | lis r11, swapper_pg_dir@h | ||
558 | ori r11, r11, swapper_pg_dir@l | ||
559 | |||
560 | mfspr r12,SPRN_MMUCR | ||
561 | rlwinm r12,r12,0,0,23 /* Clear TID */ | ||
562 | |||
563 | b 4f | ||
564 | |||
565 | /* Get the PGD for the current thread */ | ||
566 | 3: | ||
567 | mfspr r11,SPRN_SPRG3 | ||
568 | lwz r11,PGDIR(r11) | ||
569 | |||
570 | /* Load PID into MMUCR TID */ | ||
571 | mfspr r12,SPRN_MMUCR | ||
572 | mfspr r13,SPRN_PID /* Get PID */ | ||
573 | rlwimi r12,r13,0,24,31 /* Set TID */ | ||
574 | |||
575 | 4: | ||
576 | mtspr SPRN_MMUCR,r12 | ||
577 | |||
578 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ | ||
579 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ | ||
580 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ | ||
581 | beq 2f /* Bail if no table */ | ||
582 | |||
583 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ | ||
584 | lwz r11, 4(r12) /* Get pte entry */ | ||
585 | andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ | ||
586 | beq 2f /* Bail if not present */ | ||
587 | |||
588 | ori r11, r11, _PAGE_ACCESSED | ||
589 | stw r11, 4(r12) | ||
590 | |||
591 | /* Jump to common TLB load point */ | ||
592 | b finish_tlb_load | ||
593 | |||
594 | 2: | ||
595 | /* The bailout. Restore registers to pre-exception conditions | ||
596 | * and call the heavyweights to help us out. | ||
597 | */ | ||
598 | mfspr r11, SPRN_SPRG7R | ||
599 | mtcr r11 | ||
600 | mfspr r13, SPRN_SPRG5R | ||
601 | mfspr r12, SPRN_SPRG4R | ||
602 | mfspr r11, SPRN_SPRG1 | ||
603 | mfspr r10, SPRN_SPRG0 | ||
604 | b InstructionStorage | ||
605 | |||
606 | /* Debug Interrupt */ | ||
607 | DEBUG_EXCEPTION | ||
608 | |||
609 | /* | ||
610 | * Local functions | ||
611 | */ | ||
612 | /* | ||
613 | * Data TLB exceptions will bail out to this point | ||
614 | * if they can't resolve the lightweight TLB fault. | ||
615 | */ | ||
616 | data_access: | ||
617 | NORMAL_EXCEPTION_PROLOG | ||
618 | mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ | ||
619 | stw r5,_ESR(r11) | ||
620 | mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ | ||
621 | EXC_XFER_EE_LITE(0x0300, handle_page_fault) | ||
622 | |||
623 | /* | ||
624 | |||
625 | * Both the instruction and data TLB miss get to this | ||
626 | * point to load the TLB. | ||
627 | * r10 - EA of fault | ||
628 | * r11 - available to use | ||
629 | * r12 - Pointer to the 64-bit PTE | ||
630 | * r13 - available to use | ||
631 | * MMUCR - loaded with proper value when we get here | ||
632 | * Upon exit, we reload everything and RFI. | ||
633 | */ | ||
634 | finish_tlb_load: | ||
635 | /* | ||
636 | * We set execute, because we don't have the granularity to | ||
637 | * properly set this at the page level (Linux problem). | ||
638 | * If shared is set, we cause a zero PID->TID load. | ||
639 | * Many of these bits are software only. Bits we don't set | ||
640 | * here we (properly should) assume have the appropriate value. | ||
641 | */ | ||
642 | |||
643 | /* Load the next available TLB index */ | ||
644 | lis r13, tlb_44x_index@ha | ||
645 | lwz r13, tlb_44x_index@l(r13) | ||
646 | /* Load the TLB high watermark */ | ||
647 | lis r11, tlb_44x_hwater@ha | ||
648 | lwz r11, tlb_44x_hwater@l(r11) | ||
649 | |||
650 | /* Increment, rollover, and store TLB index */ | ||
651 | addi r13, r13, 1 | ||
652 | cmpw 0, r13, r11 /* reserve entries */ | ||
653 | ble 7f | ||
654 | li r13, 0 | ||
655 | 7: | ||
656 | /* Store the next available TLB index */ | ||
657 | lis r11, tlb_44x_index@ha | ||
658 | stw r13, tlb_44x_index@l(r11) | ||
659 | |||
660 | lwz r11, 0(r12) /* Get MS word of PTE */ | ||
661 | lwz r12, 4(r12) /* Get LS word of PTE */ | ||
662 | rlwimi r11, r12, 0, 0 , 19 /* Insert RPN */ | ||
663 | tlbwe r11, r13, PPC44x_TLB_XLAT /* Write XLAT */ | ||
664 | |||
665 | /* | ||
666 | * Create PAGEID. This is the faulting address, | ||
667 | * page size, and valid flag. | ||
668 | */ | ||
669 | li r11, PPC44x_TLB_VALID | PPC44x_TLB_4K | ||
670 | rlwimi r10, r11, 0, 20, 31 /* Insert valid and page size */ | ||
671 | tlbwe r10, r13, PPC44x_TLB_PAGEID /* Write PAGEID */ | ||
672 | |||
673 | li r10, PPC44x_TLB_SR@l /* Set SR */ | ||
674 | rlwimi r10, r12, 0, 30, 30 /* Set SW = _PAGE_RW */ | ||
675 | rlwimi r10, r12, 29, 29, 29 /* SX = _PAGE_HWEXEC */ | ||
676 | rlwimi r10, r12, 29, 28, 28 /* UR = _PAGE_USER */ | ||
677 | rlwimi r11, r12, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */ | ||
678 | and r11, r12, r11 /* HWEXEC & USER */ | ||
679 | rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */ | ||
680 | |||
681 | rlwimi r12, r10, 0, 26, 31 /* Insert static perms */ | ||
682 | rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */ | ||
683 | tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */ | ||
684 | |||
685 | /* Done...restore registers and get out of here. | ||
686 | */ | ||
687 | mfspr r11, SPRN_SPRG7R | ||
688 | mtcr r11 | ||
689 | mfspr r13, SPRN_SPRG5R | ||
690 | mfspr r12, SPRN_SPRG4R | ||
691 | mfspr r11, SPRN_SPRG1 | ||
692 | mfspr r10, SPRN_SPRG0 | ||
693 | rfi /* Force context change */ | ||
694 | |||
695 | /* | ||
696 | * Global functions | ||
697 | */ | ||
698 | |||
699 | /* | ||
700 | * extern void giveup_altivec(struct task_struct *prev) | ||
701 | * | ||
702 | * The 44x core does not have an AltiVec unit. | ||
703 | */ | ||
704 | _GLOBAL(giveup_altivec) | ||
705 | blr | ||
706 | |||
707 | /* | ||
708 | * extern void giveup_fpu(struct task_struct *prev) | ||
709 | * | ||
710 | * The 44x core does not have an FPU. | ||
711 | */ | ||
712 | #ifndef CONFIG_PPC_FPU | ||
713 | _GLOBAL(giveup_fpu) | ||
714 | blr | ||
715 | #endif | ||
716 | |||
717 | /* | ||
718 | * extern void abort(void) | ||
719 | * | ||
720 | * At present, this routine just applies a system reset. | ||
721 | */ | ||
722 | _GLOBAL(abort) | ||
723 | mfspr r13,SPRN_DBCR0 | ||
724 | oris r13,r13,DBCR0_RST_SYSTEM@h | ||
725 | mtspr SPRN_DBCR0,r13 | ||
726 | |||
727 | _GLOBAL(set_context) | ||
728 | |||
729 | #ifdef CONFIG_BDI_SWITCH | ||
730 | /* Context switch the PTE pointer for the Abatron BDI2000. | ||
731 | * The PGDIR is the second parameter. | ||
732 | */ | ||
733 | lis r5, abatron_pteptrs@h | ||
734 | ori r5, r5, abatron_pteptrs@l | ||
735 | stw r4, 0x4(r5) | ||
736 | #endif | ||
737 | mtspr SPRN_PID,r3 | ||
738 | isync /* Force context change */ | ||
739 | blr | ||
740 | |||
741 | /* | ||
742 | * We put a few things here that have to be page-aligned. This stuff | ||
743 | * goes at the beginning of the data segment, which is page-aligned. | ||
744 | */ | ||
745 | .data | ||
746 | _GLOBAL(sdata) | ||
747 | _GLOBAL(empty_zero_page) | ||
748 | .space 4096 | ||
749 | |||
750 | /* | ||
751 | * To support >32-bit physical addresses, we use an 8KB pgdir. | ||
752 | */ | ||
753 | _GLOBAL(swapper_pg_dir) | ||
754 | .space 8192 | ||
755 | |||
756 | /* Reserved 4k for the critical exception stack & 4k for the machine | ||
757 | * check stack per CPU for kernel mode exceptions */ | ||
758 | .section .bss | ||
759 | .align 12 | ||
760 | exception_stack_bottom: | ||
761 | .space BOOKE_EXCEPTION_STACK_SIZE | ||
762 | _GLOBAL(exception_stack_top) | ||
763 | |||
764 | /* | ||
765 | * This space gets a copy of optional info passed to us by the bootstrap | ||
766 | * which is used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
767 | */ | ||
768 | _GLOBAL(cmd_line) | ||
769 | .space 512 | ||
770 | |||
771 | /* | ||
772 | * Room for two PTE pointers, usually the kernel and current user pointers | ||
773 | * to their respective root page table. | ||
774 | */ | ||
775 | abatron_pteptrs: | ||
776 | .space 8 | ||
777 | |||
778 | |||
diff --git a/arch/powerpc/kernel/head_4xx.S b/arch/powerpc/kernel/head_4xx.S new file mode 100644 index 000000000000..8562b807b37c --- /dev/null +++ b/arch/powerpc/kernel/head_4xx.S | |||
@@ -0,0 +1,1016 @@ | |||
1 | /* | ||
2 | * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> | ||
3 | * Initial PowerPC version. | ||
4 | * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
5 | * Rewritten for PReP | ||
6 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | ||
7 | * Low-level exception handers, MMU support, and rewrite. | ||
8 | * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> | ||
9 | * PowerPC 8xx modifications. | ||
10 | * Copyright (c) 1998-1999 TiVo, Inc. | ||
11 | * PowerPC 403GCX modifications. | ||
12 | * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> | ||
13 | * PowerPC 403GCX/405GP modifications. | ||
14 | * Copyright 2000 MontaVista Software Inc. | ||
15 | * PPC405 modifications | ||
16 | * PowerPC 403GCX/405GP modifications. | ||
17 | * Author: MontaVista Software, Inc. | ||
18 | * frank_rowand@mvista.com or source@mvista.com | ||
19 | * debbie_chu@mvista.com | ||
20 | * | ||
21 | * | ||
22 | * Module name: head_4xx.S | ||
23 | * | ||
24 | * Description: | ||
25 | * Kernel execution entry point code. | ||
26 | * | ||
27 | * This program is free software; you can redistribute it and/or | ||
28 | * modify it under the terms of the GNU General Public License | ||
29 | * as published by the Free Software Foundation; either version | ||
30 | * 2 of the License, or (at your option) any later version. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/config.h> | ||
35 | #include <asm/processor.h> | ||
36 | #include <asm/page.h> | ||
37 | #include <asm/mmu.h> | ||
38 | #include <asm/pgtable.h> | ||
39 | #include <asm/ibm4xx.h> | ||
40 | #include <asm/cputable.h> | ||
41 | #include <asm/thread_info.h> | ||
42 | #include <asm/ppc_asm.h> | ||
43 | #include <asm/asm-offsets.h> | ||
44 | |||
45 | /* As with the other PowerPC ports, it is expected that when code | ||
46 | * execution begins here, the following registers contain valid, yet | ||
47 | * optional, information: | ||
48 | * | ||
49 | * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) | ||
50 | * r4 - Starting address of the init RAM disk | ||
51 | * r5 - Ending address of the init RAM disk | ||
52 | * r6 - Start of kernel command line string (e.g. "mem=96m") | ||
53 | * r7 - End of kernel command line string | ||
54 | * | ||
55 | * This is all going to change RSN when we add bi_recs....... -- Dan | ||
56 | */ | ||
57 | .text | ||
58 | _GLOBAL(_stext) | ||
59 | _GLOBAL(_start) | ||
60 | |||
61 | /* Save parameters we are passed. | ||
62 | */ | ||
63 | mr r31,r3 | ||
64 | mr r30,r4 | ||
65 | mr r29,r5 | ||
66 | mr r28,r6 | ||
67 | mr r27,r7 | ||
68 | |||
69 | /* We have to turn on the MMU right away so we get cache modes | ||
70 | * set correctly. | ||
71 | */ | ||
72 | bl initial_mmu | ||
73 | |||
74 | /* We now have the lower 16 Meg mapped into TLB entries, and the caches | ||
75 | * ready to work. | ||
76 | */ | ||
77 | turn_on_mmu: | ||
78 | lis r0,MSR_KERNEL@h | ||
79 | ori r0,r0,MSR_KERNEL@l | ||
80 | mtspr SPRN_SRR1,r0 | ||
81 | lis r0,start_here@h | ||
82 | ori r0,r0,start_here@l | ||
83 | mtspr SPRN_SRR0,r0 | ||
84 | SYNC | ||
85 | rfi /* enables MMU */ | ||
86 | b . /* prevent prefetch past rfi */ | ||
87 | |||
88 | /* | ||
89 | * This area is used for temporarily saving registers during the | ||
90 | * critical exception prolog. | ||
91 | */ | ||
92 | . = 0xc0 | ||
93 | crit_save: | ||
94 | _GLOBAL(crit_r10) | ||
95 | .space 4 | ||
96 | _GLOBAL(crit_r11) | ||
97 | .space 4 | ||
98 | |||
99 | /* | ||
100 | * Exception vector entry code. This code runs with address translation | ||
101 | * turned off (i.e. using physical addresses). We assume SPRG3 has the | ||
102 | * physical address of the current task thread_struct. | ||
103 | * Note that we have to have decremented r1 before we write to any fields | ||
104 | * of the exception frame, since a critical interrupt could occur at any | ||
105 | * time, and it will write to the area immediately below the current r1. | ||
106 | */ | ||
107 | #define NORMAL_EXCEPTION_PROLOG \ | ||
108 | mtspr SPRN_SPRG0,r10; /* save two registers to work with */\ | ||
109 | mtspr SPRN_SPRG1,r11; \ | ||
110 | mtspr SPRN_SPRG2,r1; \ | ||
111 | mfcr r10; /* save CR in r10 for now */\ | ||
112 | mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ | ||
113 | andi. r11,r11,MSR_PR; \ | ||
114 | beq 1f; \ | ||
115 | mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\ | ||
116 | lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\ | ||
117 | addi r1,r1,THREAD_SIZE; \ | ||
118 | 1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\ | ||
119 | tophys(r11,r1); \ | ||
120 | stw r10,_CCR(r11); /* save various registers */\ | ||
121 | stw r12,GPR12(r11); \ | ||
122 | stw r9,GPR9(r11); \ | ||
123 | mfspr r10,SPRN_SPRG0; \ | ||
124 | stw r10,GPR10(r11); \ | ||
125 | mfspr r12,SPRN_SPRG1; \ | ||
126 | stw r12,GPR11(r11); \ | ||
127 | mflr r10; \ | ||
128 | stw r10,_LINK(r11); \ | ||
129 | mfspr r10,SPRN_SPRG2; \ | ||
130 | mfspr r12,SPRN_SRR0; \ | ||
131 | stw r10,GPR1(r11); \ | ||
132 | mfspr r9,SPRN_SRR1; \ | ||
133 | stw r10,0(r11); \ | ||
134 | rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ | ||
135 | stw r0,GPR0(r11); \ | ||
136 | SAVE_4GPRS(3, r11); \ | ||
137 | SAVE_2GPRS(7, r11) | ||
138 | |||
139 | /* | ||
140 | * Exception prolog for critical exceptions. This is a little different | ||
141 | * from the normal exception prolog above since a critical exception | ||
142 | * can potentially occur at any point during normal exception processing. | ||
143 | * Thus we cannot use the same SPRG registers as the normal prolog above. | ||
144 | * Instead we use a couple of words of memory at low physical addresses. | ||
145 | * This is OK since we don't support SMP on these processors. | ||
146 | */ | ||
147 | #define CRITICAL_EXCEPTION_PROLOG \ | ||
148 | stw r10,crit_r10@l(0); /* save two registers to work with */\ | ||
149 | stw r11,crit_r11@l(0); \ | ||
150 | mfcr r10; /* save CR in r10 for now */\ | ||
151 | mfspr r11,SPRN_SRR3; /* check whether user or kernel */\ | ||
152 | andi. r11,r11,MSR_PR; \ | ||
153 | lis r11,critical_stack_top@h; \ | ||
154 | ori r11,r11,critical_stack_top@l; \ | ||
155 | beq 1f; \ | ||
156 | /* COMING FROM USER MODE */ \ | ||
157 | mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\ | ||
158 | lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ | ||
159 | addi r11,r11,THREAD_SIZE; \ | ||
160 | 1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\ | ||
161 | tophys(r11,r11); \ | ||
162 | stw r10,_CCR(r11); /* save various registers */\ | ||
163 | stw r12,GPR12(r11); \ | ||
164 | stw r9,GPR9(r11); \ | ||
165 | mflr r10; \ | ||
166 | stw r10,_LINK(r11); \ | ||
167 | mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ | ||
168 | stw r12,_DEAR(r11); /* since they may have had stuff */\ | ||
169 | mfspr r9,SPRN_ESR; /* in them at the point where the */\ | ||
170 | stw r9,_ESR(r11); /* exception was taken */\ | ||
171 | mfspr r12,SPRN_SRR2; \ | ||
172 | stw r1,GPR1(r11); \ | ||
173 | mfspr r9,SPRN_SRR3; \ | ||
174 | stw r1,0(r11); \ | ||
175 | tovirt(r1,r11); \ | ||
176 | rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ | ||
177 | stw r0,GPR0(r11); \ | ||
178 | SAVE_4GPRS(3, r11); \ | ||
179 | SAVE_2GPRS(7, r11) | ||
180 | |||
181 | /* | ||
182 | * State at this point: | ||
183 | * r9 saved in stack frame, now saved SRR3 & ~MSR_WE | ||
184 | * r10 saved in crit_r10 and in stack frame, trashed | ||
185 | * r11 saved in crit_r11 and in stack frame, | ||
186 | * now phys stack/exception frame pointer | ||
187 | * r12 saved in stack frame, now saved SRR2 | ||
188 | * CR saved in stack frame, CR0.EQ = !SRR3.PR | ||
189 | * LR, DEAR, ESR in stack frame | ||
190 | * r1 saved in stack frame, now virt stack/excframe pointer | ||
191 | * r0, r3-r8 saved in stack frame | ||
192 | */ | ||
193 | |||
194 | /* | ||
195 | * Exception vectors. | ||
196 | */ | ||
197 | #define START_EXCEPTION(n, label) \ | ||
198 | . = n; \ | ||
199 | label: | ||
200 | |||
201 | #define EXCEPTION(n, label, hdlr, xfer) \ | ||
202 | START_EXCEPTION(n, label); \ | ||
203 | NORMAL_EXCEPTION_PROLOG; \ | ||
204 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
205 | xfer(n, hdlr) | ||
206 | |||
207 | #define CRITICAL_EXCEPTION(n, label, hdlr) \ | ||
208 | START_EXCEPTION(n, label); \ | ||
209 | CRITICAL_EXCEPTION_PROLOG; \ | ||
210 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
211 | EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ | ||
212 | NOCOPY, crit_transfer_to_handler, \ | ||
213 | ret_from_crit_exc) | ||
214 | |||
215 | #define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \ | ||
216 | li r10,trap; \ | ||
217 | stw r10,TRAP(r11); \ | ||
218 | lis r10,msr@h; \ | ||
219 | ori r10,r10,msr@l; \ | ||
220 | copyee(r10, r9); \ | ||
221 | bl tfer; \ | ||
222 | .long hdlr; \ | ||
223 | .long ret | ||
224 | |||
225 | #define COPY_EE(d, s) rlwimi d,s,0,16,16 | ||
226 | #define NOCOPY(d, s) | ||
227 | |||
228 | #define EXC_XFER_STD(n, hdlr) \ | ||
229 | EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \ | ||
230 | ret_from_except_full) | ||
231 | |||
232 | #define EXC_XFER_LITE(n, hdlr) \ | ||
233 | EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \ | ||
234 | ret_from_except) | ||
235 | |||
236 | #define EXC_XFER_EE(n, hdlr) \ | ||
237 | EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \ | ||
238 | ret_from_except_full) | ||
239 | |||
240 | #define EXC_XFER_EE_LITE(n, hdlr) \ | ||
241 | EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \ | ||
242 | ret_from_except) | ||
243 | |||
244 | |||
245 | /* | ||
246 | * 0x0100 - Critical Interrupt Exception | ||
247 | */ | ||
248 | CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, UnknownException) | ||
249 | |||
250 | /* | ||
251 | * 0x0200 - Machine Check Exception | ||
252 | */ | ||
253 | CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) | ||
254 | |||
255 | /* | ||
256 | * 0x0300 - Data Storage Exception | ||
257 | * This happens for just a few reasons. U0 set (but we don't do that), | ||
258 | * or zone protection fault (user violation, write to protected page). | ||
259 | * If this is just an update of modified status, we do that quickly | ||
260 | * and exit. Otherwise, we call heavywight functions to do the work. | ||
261 | */ | ||
262 | START_EXCEPTION(0x0300, DataStorage) | ||
263 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
264 | mtspr SPRN_SPRG1, r11 | ||
265 | #ifdef CONFIG_403GCX | ||
266 | stw r12, 0(r0) | ||
267 | stw r9, 4(r0) | ||
268 | mfcr r11 | ||
269 | mfspr r12, SPRN_PID | ||
270 | stw r11, 8(r0) | ||
271 | stw r12, 12(r0) | ||
272 | #else | ||
273 | mtspr SPRN_SPRG4, r12 | ||
274 | mtspr SPRN_SPRG5, r9 | ||
275 | mfcr r11 | ||
276 | mfspr r12, SPRN_PID | ||
277 | mtspr SPRN_SPRG7, r11 | ||
278 | mtspr SPRN_SPRG6, r12 | ||
279 | #endif | ||
280 | |||
281 | /* First, check if it was a zone fault (which means a user | ||
282 | * tried to access a kernel or read-protected page - always | ||
283 | * a SEGV). All other faults here must be stores, so no | ||
284 | * need to check ESR_DST as well. */ | ||
285 | mfspr r10, SPRN_ESR | ||
286 | andis. r10, r10, ESR_DIZ@h | ||
287 | bne 2f | ||
288 | |||
289 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
290 | |||
291 | /* If we are faulting a kernel address, we have to use the | ||
292 | * kernel page tables. | ||
293 | */ | ||
294 | lis r11, TASK_SIZE@h | ||
295 | cmplw r10, r11 | ||
296 | blt+ 3f | ||
297 | lis r11, swapper_pg_dir@h | ||
298 | ori r11, r11, swapper_pg_dir@l | ||
299 | li r9, 0 | ||
300 | mtspr SPRN_PID, r9 /* TLB will have 0 TID */ | ||
301 | b 4f | ||
302 | |||
303 | /* Get the PGD for the current thread. | ||
304 | */ | ||
305 | 3: | ||
306 | mfspr r11,SPRN_SPRG3 | ||
307 | lwz r11,PGDIR(r11) | ||
308 | 4: | ||
309 | tophys(r11, r11) | ||
310 | rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ | ||
311 | lwz r11, 0(r11) /* Get L1 entry */ | ||
312 | rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ | ||
313 | beq 2f /* Bail if no table */ | ||
314 | |||
315 | rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ | ||
316 | lwz r11, 0(r12) /* Get Linux PTE */ | ||
317 | |||
318 | andi. r9, r11, _PAGE_RW /* Is it writeable? */ | ||
319 | beq 2f /* Bail if not */ | ||
320 | |||
321 | /* Update 'changed'. | ||
322 | */ | ||
323 | ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE | ||
324 | stw r11, 0(r12) /* Update Linux page table */ | ||
325 | |||
326 | /* Most of the Linux PTE is ready to load into the TLB LO. | ||
327 | * We set ZSEL, where only the LS-bit determines user access. | ||
328 | * We set execute, because we don't have the granularity to | ||
329 | * properly set this at the page level (Linux problem). | ||
330 | * If shared is set, we cause a zero PID->TID load. | ||
331 | * Many of these bits are software only. Bits we don't set | ||
332 | * here we (properly should) assume have the appropriate value. | ||
333 | */ | ||
334 | li r12, 0x0ce2 | ||
335 | andc r11, r11, r12 /* Make sure 20, 21 are zero */ | ||
336 | |||
337 | /* find the TLB index that caused the fault. It has to be here. | ||
338 | */ | ||
339 | tlbsx r9, 0, r10 | ||
340 | |||
341 | tlbwe r11, r9, TLB_DATA /* Load TLB LO */ | ||
342 | |||
343 | /* Done...restore registers and get out of here. | ||
344 | */ | ||
345 | #ifdef CONFIG_403GCX | ||
346 | lwz r12, 12(r0) | ||
347 | lwz r11, 8(r0) | ||
348 | mtspr SPRN_PID, r12 | ||
349 | mtcr r11 | ||
350 | lwz r9, 4(r0) | ||
351 | lwz r12, 0(r0) | ||
352 | #else | ||
353 | mfspr r12, SPRN_SPRG6 | ||
354 | mfspr r11, SPRN_SPRG7 | ||
355 | mtspr SPRN_PID, r12 | ||
356 | mtcr r11 | ||
357 | mfspr r9, SPRN_SPRG5 | ||
358 | mfspr r12, SPRN_SPRG4 | ||
359 | #endif | ||
360 | mfspr r11, SPRN_SPRG1 | ||
361 | mfspr r10, SPRN_SPRG0 | ||
362 | PPC405_ERR77_SYNC | ||
363 | rfi /* Should sync shadow TLBs */ | ||
364 | b . /* prevent prefetch past rfi */ | ||
365 | |||
366 | 2: | ||
367 | /* The bailout. Restore registers to pre-exception conditions | ||
368 | * and call the heavyweights to help us out. | ||
369 | */ | ||
370 | #ifdef CONFIG_403GCX | ||
371 | lwz r12, 12(r0) | ||
372 | lwz r11, 8(r0) | ||
373 | mtspr SPRN_PID, r12 | ||
374 | mtcr r11 | ||
375 | lwz r9, 4(r0) | ||
376 | lwz r12, 0(r0) | ||
377 | #else | ||
378 | mfspr r12, SPRN_SPRG6 | ||
379 | mfspr r11, SPRN_SPRG7 | ||
380 | mtspr SPRN_PID, r12 | ||
381 | mtcr r11 | ||
382 | mfspr r9, SPRN_SPRG5 | ||
383 | mfspr r12, SPRN_SPRG4 | ||
384 | #endif | ||
385 | mfspr r11, SPRN_SPRG1 | ||
386 | mfspr r10, SPRN_SPRG0 | ||
387 | b DataAccess | ||
388 | |||
389 | /* | ||
390 | * 0x0400 - Instruction Storage Exception | ||
391 | * This is caused by a fetch from non-execute or guarded pages. | ||
392 | */ | ||
393 | START_EXCEPTION(0x0400, InstructionAccess) | ||
394 | NORMAL_EXCEPTION_PROLOG | ||
395 | mr r4,r12 /* Pass SRR0 as arg2 */ | ||
396 | li r5,0 /* Pass zero as arg3 */ | ||
397 | EXC_XFER_EE_LITE(0x400, handle_page_fault) | ||
398 | |||
399 | /* 0x0500 - External Interrupt Exception */ | ||
400 | EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) | ||
401 | |||
402 | /* 0x0600 - Alignment Exception */ | ||
403 | START_EXCEPTION(0x0600, Alignment) | ||
404 | NORMAL_EXCEPTION_PROLOG | ||
405 | mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */ | ||
406 | stw r4,_DEAR(r11) | ||
407 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
408 | EXC_XFER_EE(0x600, AlignmentException) | ||
409 | |||
410 | /* 0x0700 - Program Exception */ | ||
411 | START_EXCEPTION(0x0700, ProgramCheck) | ||
412 | NORMAL_EXCEPTION_PROLOG | ||
413 | mfspr r4,SPRN_ESR /* Grab the ESR and save it */ | ||
414 | stw r4,_ESR(r11) | ||
415 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
416 | EXC_XFER_STD(0x700, ProgramCheckException) | ||
417 | |||
418 | EXCEPTION(0x0800, Trap_08, UnknownException, EXC_XFER_EE) | ||
419 | EXCEPTION(0x0900, Trap_09, UnknownException, EXC_XFER_EE) | ||
420 | EXCEPTION(0x0A00, Trap_0A, UnknownException, EXC_XFER_EE) | ||
421 | EXCEPTION(0x0B00, Trap_0B, UnknownException, EXC_XFER_EE) | ||
422 | |||
423 | /* 0x0C00 - System Call Exception */ | ||
424 | START_EXCEPTION(0x0C00, SystemCall) | ||
425 | NORMAL_EXCEPTION_PROLOG | ||
426 | EXC_XFER_EE_LITE(0xc00, DoSyscall) | ||
427 | |||
428 | EXCEPTION(0x0D00, Trap_0D, UnknownException, EXC_XFER_EE) | ||
429 | EXCEPTION(0x0E00, Trap_0E, UnknownException, EXC_XFER_EE) | ||
430 | EXCEPTION(0x0F00, Trap_0F, UnknownException, EXC_XFER_EE) | ||
431 | |||
432 | /* 0x1000 - Programmable Interval Timer (PIT) Exception */ | ||
433 | START_EXCEPTION(0x1000, Decrementer) | ||
434 | NORMAL_EXCEPTION_PROLOG | ||
435 | lis r0,TSR_PIS@h | ||
436 | mtspr SPRN_TSR,r0 /* Clear the PIT exception */ | ||
437 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
438 | EXC_XFER_LITE(0x1000, timer_interrupt) | ||
439 | |||
440 | #if 0 | ||
441 | /* NOTE: | ||
442 | * FIT and WDT handlers are not implemented yet. | ||
443 | */ | ||
444 | |||
445 | /* 0x1010 - Fixed Interval Timer (FIT) Exception | ||
446 | */ | ||
447 | STND_EXCEPTION(0x1010, FITException, UnknownException) | ||
448 | |||
449 | /* 0x1020 - Watchdog Timer (WDT) Exception | ||
450 | */ | ||
451 | #ifdef CONFIG_BOOKE_WDT | ||
452 | CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException) | ||
453 | #else | ||
454 | CRITICAL_EXCEPTION(0x1020, WDTException, UnknownException) | ||
455 | #endif | ||
456 | #endif | ||
457 | |||
458 | /* 0x1100 - Data TLB Miss Exception | ||
459 | * As the name implies, translation is not in the MMU, so search the | ||
460 | * page tables and fix it. The only purpose of this function is to | ||
461 | * load TLB entries from the page table if they exist. | ||
462 | */ | ||
463 | START_EXCEPTION(0x1100, DTLBMiss) | ||
464 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
465 | mtspr SPRN_SPRG1, r11 | ||
466 | #ifdef CONFIG_403GCX | ||
467 | stw r12, 0(r0) | ||
468 | stw r9, 4(r0) | ||
469 | mfcr r11 | ||
470 | mfspr r12, SPRN_PID | ||
471 | stw r11, 8(r0) | ||
472 | stw r12, 12(r0) | ||
473 | #else | ||
474 | mtspr SPRN_SPRG4, r12 | ||
475 | mtspr SPRN_SPRG5, r9 | ||
476 | mfcr r11 | ||
477 | mfspr r12, SPRN_PID | ||
478 | mtspr SPRN_SPRG7, r11 | ||
479 | mtspr SPRN_SPRG6, r12 | ||
480 | #endif | ||
481 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
482 | |||
483 | /* If we are faulting a kernel address, we have to use the | ||
484 | * kernel page tables. | ||
485 | */ | ||
486 | lis r11, TASK_SIZE@h | ||
487 | cmplw r10, r11 | ||
488 | blt+ 3f | ||
489 | lis r11, swapper_pg_dir@h | ||
490 | ori r11, r11, swapper_pg_dir@l | ||
491 | li r9, 0 | ||
492 | mtspr SPRN_PID, r9 /* TLB will have 0 TID */ | ||
493 | b 4f | ||
494 | |||
495 | /* Get the PGD for the current thread. | ||
496 | */ | ||
497 | 3: | ||
498 | mfspr r11,SPRN_SPRG3 | ||
499 | lwz r11,PGDIR(r11) | ||
500 | 4: | ||
501 | tophys(r11, r11) | ||
502 | rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ | ||
503 | lwz r12, 0(r11) /* Get L1 entry */ | ||
504 | andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */ | ||
505 | beq 2f /* Bail if no table */ | ||
506 | |||
507 | rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ | ||
508 | lwz r11, 0(r12) /* Get Linux PTE */ | ||
509 | andi. r9, r11, _PAGE_PRESENT | ||
510 | beq 5f | ||
511 | |||
512 | ori r11, r11, _PAGE_ACCESSED | ||
513 | stw r11, 0(r12) | ||
514 | |||
515 | /* Create TLB tag. This is the faulting address plus a static | ||
516 | * set of bits. These are size, valid, E, U0. | ||
517 | */ | ||
518 | li r12, 0x00c0 | ||
519 | rlwimi r10, r12, 0, 20, 31 | ||
520 | |||
521 | b finish_tlb_load | ||
522 | |||
523 | 2: /* Check for possible large-page pmd entry */ | ||
524 | rlwinm. r9, r12, 2, 22, 24 | ||
525 | beq 5f | ||
526 | |||
527 | /* Create TLB tag. This is the faulting address, plus a static | ||
528 | * set of bits (valid, E, U0) plus the size from the PMD. | ||
529 | */ | ||
530 | ori r9, r9, 0x40 | ||
531 | rlwimi r10, r9, 0, 20, 31 | ||
532 | mr r11, r12 | ||
533 | |||
534 | b finish_tlb_load | ||
535 | |||
536 | 5: | ||
537 | /* The bailout. Restore registers to pre-exception conditions | ||
538 | * and call the heavyweights to help us out. | ||
539 | */ | ||
540 | #ifdef CONFIG_403GCX | ||
541 | lwz r12, 12(r0) | ||
542 | lwz r11, 8(r0) | ||
543 | mtspr SPRN_PID, r12 | ||
544 | mtcr r11 | ||
545 | lwz r9, 4(r0) | ||
546 | lwz r12, 0(r0) | ||
547 | #else | ||
548 | mfspr r12, SPRN_SPRG6 | ||
549 | mfspr r11, SPRN_SPRG7 | ||
550 | mtspr SPRN_PID, r12 | ||
551 | mtcr r11 | ||
552 | mfspr r9, SPRN_SPRG5 | ||
553 | mfspr r12, SPRN_SPRG4 | ||
554 | #endif | ||
555 | mfspr r11, SPRN_SPRG1 | ||
556 | mfspr r10, SPRN_SPRG0 | ||
557 | b DataAccess | ||
558 | |||
559 | /* 0x1200 - Instruction TLB Miss Exception | ||
560 | * Nearly the same as above, except we get our information from different | ||
561 | * registers and bailout to a different point. | ||
562 | */ | ||
563 | START_EXCEPTION(0x1200, ITLBMiss) | ||
564 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
565 | mtspr SPRN_SPRG1, r11 | ||
566 | #ifdef CONFIG_403GCX | ||
567 | stw r12, 0(r0) | ||
568 | stw r9, 4(r0) | ||
569 | mfcr r11 | ||
570 | mfspr r12, SPRN_PID | ||
571 | stw r11, 8(r0) | ||
572 | stw r12, 12(r0) | ||
573 | #else | ||
574 | mtspr SPRN_SPRG4, r12 | ||
575 | mtspr SPRN_SPRG5, r9 | ||
576 | mfcr r11 | ||
577 | mfspr r12, SPRN_PID | ||
578 | mtspr SPRN_SPRG7, r11 | ||
579 | mtspr SPRN_SPRG6, r12 | ||
580 | #endif | ||
581 | mfspr r10, SPRN_SRR0 /* Get faulting address */ | ||
582 | |||
583 | /* If we are faulting a kernel address, we have to use the | ||
584 | * kernel page tables. | ||
585 | */ | ||
586 | lis r11, TASK_SIZE@h | ||
587 | cmplw r10, r11 | ||
588 | blt+ 3f | ||
589 | lis r11, swapper_pg_dir@h | ||
590 | ori r11, r11, swapper_pg_dir@l | ||
591 | li r9, 0 | ||
592 | mtspr SPRN_PID, r9 /* TLB will have 0 TID */ | ||
593 | b 4f | ||
594 | |||
595 | /* Get the PGD for the current thread. | ||
596 | */ | ||
597 | 3: | ||
598 | mfspr r11,SPRN_SPRG3 | ||
599 | lwz r11,PGDIR(r11) | ||
600 | 4: | ||
601 | tophys(r11, r11) | ||
602 | rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ | ||
603 | lwz r12, 0(r11) /* Get L1 entry */ | ||
604 | andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */ | ||
605 | beq 2f /* Bail if no table */ | ||
606 | |||
607 | rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ | ||
608 | lwz r11, 0(r12) /* Get Linux PTE */ | ||
609 | andi. r9, r11, _PAGE_PRESENT | ||
610 | beq 5f | ||
611 | |||
612 | ori r11, r11, _PAGE_ACCESSED | ||
613 | stw r11, 0(r12) | ||
614 | |||
615 | /* Create TLB tag. This is the faulting address plus a static | ||
616 | * set of bits. These are size, valid, E, U0. | ||
617 | */ | ||
618 | li r12, 0x00c0 | ||
619 | rlwimi r10, r12, 0, 20, 31 | ||
620 | |||
621 | b finish_tlb_load | ||
622 | |||
623 | 2: /* Check for possible large-page pmd entry */ | ||
624 | rlwinm. r9, r12, 2, 22, 24 | ||
625 | beq 5f | ||
626 | |||
627 | /* Create TLB tag. This is the faulting address, plus a static | ||
628 | * set of bits (valid, E, U0) plus the size from the PMD. | ||
629 | */ | ||
630 | ori r9, r9, 0x40 | ||
631 | rlwimi r10, r9, 0, 20, 31 | ||
632 | mr r11, r12 | ||
633 | |||
634 | b finish_tlb_load | ||
635 | |||
636 | 5: | ||
637 | /* The bailout. Restore registers to pre-exception conditions | ||
638 | * and call the heavyweights to help us out. | ||
639 | */ | ||
640 | #ifdef CONFIG_403GCX | ||
641 | lwz r12, 12(r0) | ||
642 | lwz r11, 8(r0) | ||
643 | mtspr SPRN_PID, r12 | ||
644 | mtcr r11 | ||
645 | lwz r9, 4(r0) | ||
646 | lwz r12, 0(r0) | ||
647 | #else | ||
648 | mfspr r12, SPRN_SPRG6 | ||
649 | mfspr r11, SPRN_SPRG7 | ||
650 | mtspr SPRN_PID, r12 | ||
651 | mtcr r11 | ||
652 | mfspr r9, SPRN_SPRG5 | ||
653 | mfspr r12, SPRN_SPRG4 | ||
654 | #endif | ||
655 | mfspr r11, SPRN_SPRG1 | ||
656 | mfspr r10, SPRN_SPRG0 | ||
657 | b InstructionAccess | ||
658 | |||
659 | EXCEPTION(0x1300, Trap_13, UnknownException, EXC_XFER_EE) | ||
660 | EXCEPTION(0x1400, Trap_14, UnknownException, EXC_XFER_EE) | ||
661 | EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) | ||
662 | EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) | ||
663 | #ifdef CONFIG_IBM405_ERR51 | ||
664 | /* 405GP errata 51 */ | ||
665 | START_EXCEPTION(0x1700, Trap_17) | ||
666 | b DTLBMiss | ||
667 | #else | ||
668 | EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE) | ||
669 | #endif | ||
670 | EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) | ||
671 | EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) | ||
672 | EXCEPTION(0x1A00, Trap_1A, UnknownException, EXC_XFER_EE) | ||
673 | EXCEPTION(0x1B00, Trap_1B, UnknownException, EXC_XFER_EE) | ||
674 | EXCEPTION(0x1C00, Trap_1C, UnknownException, EXC_XFER_EE) | ||
675 | EXCEPTION(0x1D00, Trap_1D, UnknownException, EXC_XFER_EE) | ||
676 | EXCEPTION(0x1E00, Trap_1E, UnknownException, EXC_XFER_EE) | ||
677 | EXCEPTION(0x1F00, Trap_1F, UnknownException, EXC_XFER_EE) | ||
678 | |||
679 | /* Check for a single step debug exception while in an exception | ||
680 | * handler before state has been saved. This is to catch the case | ||
681 | * where an instruction that we are trying to single step causes | ||
682 | * an exception (eg ITLB/DTLB miss) and thus the first instruction of | ||
683 | * the exception handler generates a single step debug exception. | ||
684 | * | ||
685 | * If we get a debug trap on the first instruction of an exception handler, | ||
686 | * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is | ||
687 | * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR). | ||
688 | * The exception handler was handling a non-critical interrupt, so it will | ||
689 | * save (and later restore) the MSR via SPRN_SRR1, which will still have | ||
690 | * the MSR_DE bit set. | ||
691 | */ | ||
692 | /* 0x2000 - Debug Exception */ | ||
693 | START_EXCEPTION(0x2000, DebugTrap) | ||
694 | CRITICAL_EXCEPTION_PROLOG | ||
695 | |||
696 | /* | ||
697 | * If this is a single step or branch-taken exception in an | ||
698 | * exception entry sequence, it was probably meant to apply to | ||
699 | * the code where the exception occurred (since exception entry | ||
700 | * doesn't turn off DE automatically). We simulate the effect | ||
701 | * of turning off DE on entry to an exception handler by turning | ||
702 | * off DE in the SRR3 value and clearing the debug status. | ||
703 | */ | ||
704 | mfspr r10,SPRN_DBSR /* check single-step/branch taken */ | ||
705 | andis. r10,r10,DBSR_IC@h | ||
706 | beq+ 2f | ||
707 | |||
708 | andi. r10,r9,MSR_IR|MSR_PR /* check supervisor + MMU off */ | ||
709 | beq 1f /* branch and fix it up */ | ||
710 | |||
711 | mfspr r10,SPRN_SRR2 /* Faulting instruction address */ | ||
712 | cmplwi r10,0x2100 | ||
713 | bgt+ 2f /* address above exception vectors */ | ||
714 | |||
715 | /* here it looks like we got an inappropriate debug exception. */ | ||
716 | 1: rlwinm r9,r9,0,~MSR_DE /* clear DE in the SRR3 value */ | ||
717 | lis r10,DBSR_IC@h /* clear the IC event */ | ||
718 | mtspr SPRN_DBSR,r10 | ||
719 | /* restore state and get out */ | ||
720 | lwz r10,_CCR(r11) | ||
721 | lwz r0,GPR0(r11) | ||
722 | lwz r1,GPR1(r11) | ||
723 | mtcrf 0x80,r10 | ||
724 | mtspr SPRN_SRR2,r12 | ||
725 | mtspr SPRN_SRR3,r9 | ||
726 | lwz r9,GPR9(r11) | ||
727 | lwz r12,GPR12(r11) | ||
728 | lwz r10,crit_r10@l(0) | ||
729 | lwz r11,crit_r11@l(0) | ||
730 | PPC405_ERR77_SYNC | ||
731 | rfci | ||
732 | b . | ||
733 | |||
734 | /* continue normal handling for a critical exception... */ | ||
735 | 2: mfspr r4,SPRN_DBSR | ||
736 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
737 | EXC_XFER_TEMPLATE(DebugException, 0x2002, \ | ||
738 | (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ | ||
739 | NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) | ||
740 | |||
741 | /* | ||
742 | * The other Data TLB exceptions bail out to this point | ||
743 | * if they can't resolve the lightweight TLB fault. | ||
744 | */ | ||
745 | DataAccess: | ||
746 | NORMAL_EXCEPTION_PROLOG | ||
747 | mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ | ||
748 | stw r5,_ESR(r11) | ||
749 | mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ | ||
750 | EXC_XFER_EE_LITE(0x300, handle_page_fault) | ||
751 | |||
752 | /* Other PowerPC processors, namely those derived from the 6xx-series | ||
753 | * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved. | ||
754 | * However, for the 4xx-series processors these are neither defined nor | ||
755 | * reserved. | ||
756 | */ | ||
757 | |||
758 | /* Damn, I came up one instruction too many to fit into the | ||
759 | * exception space :-). Both the instruction and data TLB | ||
760 | * miss get to this point to load the TLB. | ||
761 | * r10 - TLB_TAG value | ||
762 | * r11 - Linux PTE | ||
763 | * r12, r9 - avilable to use | ||
764 | * PID - loaded with proper value when we get here | ||
765 | * Upon exit, we reload everything and RFI. | ||
766 | * Actually, it will fit now, but oh well.....a common place | ||
767 | * to load the TLB. | ||
768 | */ | ||
769 | tlb_4xx_index: | ||
770 | .long 0 | ||
771 | finish_tlb_load: | ||
772 | /* load the next available TLB index. | ||
773 | */ | ||
774 | lwz r9, tlb_4xx_index@l(0) | ||
775 | addi r9, r9, 1 | ||
776 | andi. r9, r9, (PPC4XX_TLB_SIZE-1) | ||
777 | stw r9, tlb_4xx_index@l(0) | ||
778 | |||
779 | 6: | ||
780 | /* | ||
781 | * Clear out the software-only bits in the PTE to generate the | ||
782 | * TLB_DATA value. These are the bottom 2 bits of the RPM, the | ||
783 | * top 3 bits of the zone field, and M. | ||
784 | */ | ||
785 | li r12, 0x0ce2 | ||
786 | andc r11, r11, r12 | ||
787 | |||
788 | tlbwe r11, r9, TLB_DATA /* Load TLB LO */ | ||
789 | tlbwe r10, r9, TLB_TAG /* Load TLB HI */ | ||
790 | |||
791 | /* Done...restore registers and get out of here. | ||
792 | */ | ||
793 | #ifdef CONFIG_403GCX | ||
794 | lwz r12, 12(r0) | ||
795 | lwz r11, 8(r0) | ||
796 | mtspr SPRN_PID, r12 | ||
797 | mtcr r11 | ||
798 | lwz r9, 4(r0) | ||
799 | lwz r12, 0(r0) | ||
800 | #else | ||
801 | mfspr r12, SPRN_SPRG6 | ||
802 | mfspr r11, SPRN_SPRG7 | ||
803 | mtspr SPRN_PID, r12 | ||
804 | mtcr r11 | ||
805 | mfspr r9, SPRN_SPRG5 | ||
806 | mfspr r12, SPRN_SPRG4 | ||
807 | #endif | ||
808 | mfspr r11, SPRN_SPRG1 | ||
809 | mfspr r10, SPRN_SPRG0 | ||
810 | PPC405_ERR77_SYNC | ||
811 | rfi /* Should sync shadow TLBs */ | ||
812 | b . /* prevent prefetch past rfi */ | ||
813 | |||
814 | /* extern void giveup_fpu(struct task_struct *prev) | ||
815 | * | ||
816 | * The PowerPC 4xx family of processors do not have an FPU, so this just | ||
817 | * returns. | ||
818 | */ | ||
819 | _GLOBAL(giveup_fpu) | ||
820 | blr | ||
821 | |||
822 | /* This is where the main kernel code starts. | ||
823 | */ | ||
824 | start_here: | ||
825 | |||
826 | /* ptr to current */ | ||
827 | lis r2,init_task@h | ||
828 | ori r2,r2,init_task@l | ||
829 | |||
830 | /* ptr to phys current thread */ | ||
831 | tophys(r4,r2) | ||
832 | addi r4,r4,THREAD /* init task's THREAD */ | ||
833 | mtspr SPRN_SPRG3,r4 | ||
834 | |||
835 | /* stack */ | ||
836 | lis r1,init_thread_union@ha | ||
837 | addi r1,r1,init_thread_union@l | ||
838 | li r0,0 | ||
839 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | ||
840 | |||
841 | bl early_init /* We have to do this with MMU on */ | ||
842 | |||
843 | /* | ||
844 | * Decide what sort of machine this is and initialize the MMU. | ||
845 | */ | ||
846 | mr r3,r31 | ||
847 | mr r4,r30 | ||
848 | mr r5,r29 | ||
849 | mr r6,r28 | ||
850 | mr r7,r27 | ||
851 | bl machine_init | ||
852 | bl MMU_init | ||
853 | |||
854 | /* Go back to running unmapped so we can load up new values | ||
855 | * and change to using our exception vectors. | ||
856 | * On the 4xx, all we have to do is invalidate the TLB to clear | ||
857 | * the old 16M byte TLB mappings. | ||
858 | */ | ||
859 | lis r4,2f@h | ||
860 | ori r4,r4,2f@l | ||
861 | tophys(r4,r4) | ||
862 | lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h | ||
863 | ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l | ||
864 | mtspr SPRN_SRR0,r4 | ||
865 | mtspr SPRN_SRR1,r3 | ||
866 | rfi | ||
867 | b . /* prevent prefetch past rfi */ | ||
868 | |||
869 | /* Load up the kernel context */ | ||
870 | 2: | ||
871 | sync /* Flush to memory before changing TLB */ | ||
872 | tlbia | ||
873 | isync /* Flush shadow TLBs */ | ||
874 | |||
875 | /* set up the PTE pointers for the Abatron bdiGDB. | ||
876 | */ | ||
877 | lis r6, swapper_pg_dir@h | ||
878 | ori r6, r6, swapper_pg_dir@l | ||
879 | lis r5, abatron_pteptrs@h | ||
880 | ori r5, r5, abatron_pteptrs@l | ||
881 | stw r5, 0xf0(r0) /* Must match your Abatron config file */ | ||
882 | tophys(r5,r5) | ||
883 | stw r6, 0(r5) | ||
884 | |||
885 | /* Now turn on the MMU for real! */ | ||
886 | lis r4,MSR_KERNEL@h | ||
887 | ori r4,r4,MSR_KERNEL@l | ||
888 | lis r3,start_kernel@h | ||
889 | ori r3,r3,start_kernel@l | ||
890 | mtspr SPRN_SRR0,r3 | ||
891 | mtspr SPRN_SRR1,r4 | ||
892 | rfi /* enable MMU and jump to start_kernel */ | ||
893 | b . /* prevent prefetch past rfi */ | ||
894 | |||
895 | /* Set up the initial MMU state so we can do the first level of | ||
896 | * kernel initialization. This maps the first 16 MBytes of memory 1:1 | ||
897 | * virtual to physical and more importantly sets the cache mode. | ||
898 | */ | ||
899 | initial_mmu: | ||
900 | tlbia /* Invalidate all TLB entries */ | ||
901 | isync | ||
902 | |||
903 | /* We should still be executing code at physical address 0x0000xxxx | ||
904 | * at this point. However, start_here is at virtual address | ||
905 | * 0xC000xxxx. So, set up a TLB mapping to cover this once | ||
906 | * translation is enabled. | ||
907 | */ | ||
908 | |||
909 | lis r3,KERNELBASE@h /* Load the kernel virtual address */ | ||
910 | ori r3,r3,KERNELBASE@l | ||
911 | tophys(r4,r3) /* Load the kernel physical address */ | ||
912 | |||
913 | iccci r0,r3 /* Invalidate the i-cache before use */ | ||
914 | |||
915 | /* Load the kernel PID. | ||
916 | */ | ||
917 | li r0,0 | ||
918 | mtspr SPRN_PID,r0 | ||
919 | sync | ||
920 | |||
921 | /* Configure and load two entries into TLB slots 62 and 63. | ||
922 | * In case we are pinning TLBs, these are reserved in by the | ||
923 | * other TLB functions. If not reserving, then it doesn't | ||
924 | * matter where they are loaded. | ||
925 | */ | ||
926 | clrrwi r4,r4,10 /* Mask off the real page number */ | ||
927 | ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ | ||
928 | |||
929 | clrrwi r3,r3,10 /* Mask off the effective page number */ | ||
930 | ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M)) | ||
931 | |||
932 | li r0,63 /* TLB slot 63 */ | ||
933 | |||
934 | tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ | ||
935 | tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ | ||
936 | |||
937 | #if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE) | ||
938 | |||
939 | /* Load a TLB entry for the UART, so that ppc4xx_progress() can use | ||
940 | * the UARTs nice and early. We use a 4k real==virtual mapping. */ | ||
941 | |||
942 | lis r3,SERIAL_DEBUG_IO_BASE@h | ||
943 | ori r3,r3,SERIAL_DEBUG_IO_BASE@l | ||
944 | mr r4,r3 | ||
945 | clrrwi r4,r4,12 | ||
946 | ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G) | ||
947 | |||
948 | clrrwi r3,r3,12 | ||
949 | ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K)) | ||
950 | |||
951 | li r0,0 /* TLB slot 0 */ | ||
952 | tlbwe r4,r0,TLB_DATA | ||
953 | tlbwe r3,r0,TLB_TAG | ||
954 | #endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */ | ||
955 | |||
956 | isync | ||
957 | |||
958 | /* Establish the exception vector base | ||
959 | */ | ||
960 | lis r4,KERNELBASE@h /* EVPR only uses the high 16-bits */ | ||
961 | tophys(r0,r4) /* Use the physical address */ | ||
962 | mtspr SPRN_EVPR,r0 | ||
963 | |||
964 | blr | ||
965 | |||
966 | _GLOBAL(abort) | ||
967 | mfspr r13,SPRN_DBCR0 | ||
968 | oris r13,r13,DBCR0_RST_SYSTEM@h | ||
969 | mtspr SPRN_DBCR0,r13 | ||
970 | |||
971 | _GLOBAL(set_context) | ||
972 | |||
973 | #ifdef CONFIG_BDI_SWITCH | ||
974 | /* Context switch the PTE pointer for the Abatron BDI2000. | ||
975 | * The PGDIR is the second parameter. | ||
976 | */ | ||
977 | lis r5, KERNELBASE@h | ||
978 | lwz r5, 0xf0(r5) | ||
979 | stw r4, 0x4(r5) | ||
980 | #endif | ||
981 | sync | ||
982 | mtspr SPRN_PID,r3 | ||
983 | isync /* Need an isync to flush shadow */ | ||
984 | /* TLBs after changing PID */ | ||
985 | blr | ||
986 | |||
987 | /* We put a few things here that have to be page-aligned. This stuff | ||
988 | * goes at the beginning of the data segment, which is page-aligned. | ||
989 | */ | ||
990 | .data | ||
991 | _GLOBAL(sdata) | ||
992 | _GLOBAL(empty_zero_page) | ||
993 | .space 4096 | ||
994 | _GLOBAL(swapper_pg_dir) | ||
995 | .space 4096 | ||
996 | |||
997 | |||
998 | /* Stack for handling critical exceptions from kernel mode */ | ||
999 | .section .bss | ||
1000 | .align 12 | ||
1001 | exception_stack_bottom: | ||
1002 | .space 4096 | ||
1003 | critical_stack_top: | ||
1004 | _GLOBAL(exception_stack_top) | ||
1005 | |||
1006 | /* This space gets a copy of optional info passed to us by the bootstrap | ||
1007 | * which is used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
1008 | */ | ||
1009 | _GLOBAL(cmd_line) | ||
1010 | .space 512 | ||
1011 | |||
1012 | /* Room for two PTE pointers, usually the kernel and current user pointers | ||
1013 | * to their respective root page table. | ||
1014 | */ | ||
1015 | abatron_pteptrs: | ||
1016 | .space 8 | ||
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S new file mode 100644 index 000000000000..22a5ee07e1ea --- /dev/null +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -0,0 +1,2011 @@ | |||
1 | /* | ||
2 | * arch/ppc64/kernel/head.S | ||
3 | * | ||
4 | * PowerPC version | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * | ||
7 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | ||
8 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
9 | * Adapted for Power Macintosh by Paul Mackerras. | ||
10 | * Low-level exception handlers and MMU support | ||
11 | * rewritten by Paul Mackerras. | ||
12 | * Copyright (C) 1996 Paul Mackerras. | ||
13 | * | ||
14 | * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and | ||
15 | * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com | ||
16 | * | ||
17 | * This file contains the low-level support and setup for the | ||
18 | * PowerPC-64 platform, including trap and interrupt dispatch. | ||
19 | * | ||
20 | * This program is free software; you can redistribute it and/or | ||
21 | * modify it under the terms of the GNU General Public License | ||
22 | * as published by the Free Software Foundation; either version | ||
23 | * 2 of the License, or (at your option) any later version. | ||
24 | */ | ||
25 | |||
26 | #include <linux/config.h> | ||
27 | #include <linux/threads.h> | ||
28 | #include <asm/processor.h> | ||
29 | #include <asm/page.h> | ||
30 | #include <asm/mmu.h> | ||
31 | #include <asm/systemcfg.h> | ||
32 | #include <asm/ppc_asm.h> | ||
33 | #include <asm/asm-offsets.h> | ||
34 | #include <asm/bug.h> | ||
35 | #include <asm/cputable.h> | ||
36 | #include <asm/setup.h> | ||
37 | #include <asm/hvcall.h> | ||
38 | #include <asm/iSeries/LparMap.h> | ||
39 | |||
40 | #ifdef CONFIG_PPC_ISERIES | ||
41 | #define DO_SOFT_DISABLE | ||
42 | #endif | ||
43 | |||
44 | /* | ||
45 | * We layout physical memory as follows: | ||
46 | * 0x0000 - 0x00ff : Secondary processor spin code | ||
47 | * 0x0100 - 0x2fff : pSeries Interrupt prologs | ||
48 | * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs | ||
49 | * 0x6000 - 0x6fff : Initial (CPU0) segment table | ||
50 | * 0x7000 - 0x7fff : FWNMI data area | ||
51 | * 0x8000 - : Early init and support code | ||
52 | */ | ||
53 | |||
54 | /* | ||
55 | * SPRG Usage | ||
56 | * | ||
57 | * Register Definition | ||
58 | * | ||
59 | * SPRG0 reserved for hypervisor | ||
60 | * SPRG1 temp - used to save gpr | ||
61 | * SPRG2 temp - used to save gpr | ||
62 | * SPRG3 virt addr of paca | ||
63 | */ | ||
64 | |||
65 | /* | ||
66 | * Entering into this code we make the following assumptions: | ||
67 | * For pSeries: | ||
68 | * 1. The MMU is off & open firmware is running in real mode. | ||
69 | * 2. The kernel is entered at __start | ||
70 | * | ||
71 | * For iSeries: | ||
72 | * 1. The MMU is on (as it always is for iSeries) | ||
73 | * 2. The kernel is entered at system_reset_iSeries | ||
74 | */ | ||
75 | |||
76 | .text | ||
77 | .globl _stext | ||
78 | _stext: | ||
79 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
80 | _GLOBAL(__start) | ||
81 | /* NOP this out unconditionally */ | ||
82 | BEGIN_FTR_SECTION | ||
83 | b .__start_initialization_multiplatform | ||
84 | END_FTR_SECTION(0, 1) | ||
85 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | ||
86 | |||
87 | /* Catch branch to 0 in real mode */ | ||
88 | trap | ||
89 | |||
90 | #ifdef CONFIG_PPC_ISERIES | ||
91 | /* | ||
92 | * At offset 0x20, there is a pointer to iSeries LPAR data. | ||
93 | * This is required by the hypervisor | ||
94 | */ | ||
95 | . = 0x20 | ||
96 | .llong hvReleaseData-KERNELBASE | ||
97 | |||
98 | /* | ||
99 | * At offset 0x28 and 0x30 are offsets to the mschunks_map | ||
100 | * array (used by the iSeries LPAR debugger to do translation | ||
101 | * between physical addresses and absolute addresses) and | ||
102 | * to the pidhash table (also used by the debugger) | ||
103 | */ | ||
104 | .llong mschunks_map-KERNELBASE | ||
105 | .llong 0 /* pidhash-KERNELBASE SFRXXX */ | ||
106 | |||
107 | /* Offset 0x38 - Pointer to start of embedded System.map */ | ||
108 | .globl embedded_sysmap_start | ||
109 | embedded_sysmap_start: | ||
110 | .llong 0 | ||
111 | /* Offset 0x40 - Pointer to end of embedded System.map */ | ||
112 | .globl embedded_sysmap_end | ||
113 | embedded_sysmap_end: | ||
114 | .llong 0 | ||
115 | |||
116 | #endif /* CONFIG_PPC_ISERIES */ | ||
117 | |||
118 | /* Secondary processors spin on this value until it goes to 1. */ | ||
119 | .globl __secondary_hold_spinloop | ||
120 | __secondary_hold_spinloop: | ||
121 | .llong 0x0 | ||
122 | |||
123 | /* Secondary processors write this value with their cpu # */ | ||
124 | /* after they enter the spin loop immediately below. */ | ||
125 | .globl __secondary_hold_acknowledge | ||
126 | __secondary_hold_acknowledge: | ||
127 | .llong 0x0 | ||
128 | |||
129 | . = 0x60 | ||
130 | /* | ||
131 | * The following code is used on pSeries to hold secondary processors | ||
132 | * in a spin loop after they have been freed from OpenFirmware, but | ||
133 | * before the bulk of the kernel has been relocated. This code | ||
134 | * is relocated to physical address 0x60 before prom_init is run. | ||
135 | * All of it must fit below the first exception vector at 0x100. | ||
136 | */ | ||
137 | _GLOBAL(__secondary_hold) | ||
138 | mfmsr r24 | ||
139 | ori r24,r24,MSR_RI | ||
140 | mtmsrd r24 /* RI on */ | ||
141 | |||
142 | /* Grab our linux cpu number */ | ||
143 | mr r24,r3 | ||
144 | |||
145 | /* Tell the master cpu we're here */ | ||
146 | /* Relocation is off & we are located at an address less */ | ||
147 | /* than 0x100, so only need to grab low order offset. */ | ||
148 | std r24,__secondary_hold_acknowledge@l(0) | ||
149 | sync | ||
150 | |||
151 | /* All secondary cpus wait here until told to start. */ | ||
152 | 100: ld r4,__secondary_hold_spinloop@l(0) | ||
153 | cmpdi 0,r4,1 | ||
154 | bne 100b | ||
155 | |||
156 | #ifdef CONFIG_HMT | ||
157 | b .hmt_init | ||
158 | #else | ||
159 | #ifdef CONFIG_SMP | ||
160 | mr r3,r24 | ||
161 | b .pSeries_secondary_smp_init | ||
162 | #else | ||
163 | BUG_OPCODE | ||
164 | #endif | ||
165 | #endif | ||
166 | |||
167 | /* This value is used to mark exception frames on the stack. */ | ||
168 | .section ".toc","aw" | ||
169 | exception_marker: | ||
170 | .tc ID_72656773_68657265[TC],0x7265677368657265 | ||
171 | .text | ||
172 | |||
173 | /* | ||
174 | * The following macros define the code that appears as | ||
175 | * the prologue to each of the exception handlers. They | ||
176 | * are split into two parts to allow a single kernel binary | ||
177 | * to be used for pSeries and iSeries. | ||
178 | * LOL. One day... - paulus | ||
179 | */ | ||
180 | |||
181 | /* | ||
182 | * We make as much of the exception code common between native | ||
183 | * exception handlers (including pSeries LPAR) and iSeries LPAR | ||
184 | * implementations as possible. | ||
185 | */ | ||
186 | |||
187 | /* | ||
188 | * This is the start of the interrupt handlers for pSeries | ||
189 | * This code runs with relocation off. | ||
190 | */ | ||
191 | #define EX_R9 0 | ||
192 | #define EX_R10 8 | ||
193 | #define EX_R11 16 | ||
194 | #define EX_R12 24 | ||
195 | #define EX_R13 32 | ||
196 | #define EX_SRR0 40 | ||
197 | #define EX_R3 40 /* SLB miss saves R3, but not SRR0 */ | ||
198 | #define EX_DAR 48 | ||
199 | #define EX_LR 48 /* SLB miss saves LR, but not DAR */ | ||
200 | #define EX_DSISR 56 | ||
201 | #define EX_CCR 60 | ||
202 | |||
203 | #define EXCEPTION_PROLOG_PSERIES(area, label) \ | ||
204 | mfspr r13,SPRG3; /* get paca address into r13 */ \ | ||
205 | std r9,area+EX_R9(r13); /* save r9 - r12 */ \ | ||
206 | std r10,area+EX_R10(r13); \ | ||
207 | std r11,area+EX_R11(r13); \ | ||
208 | std r12,area+EX_R12(r13); \ | ||
209 | mfspr r9,SPRG1; \ | ||
210 | std r9,area+EX_R13(r13); \ | ||
211 | mfcr r9; \ | ||
212 | clrrdi r12,r13,32; /* get high part of &label */ \ | ||
213 | mfmsr r10; \ | ||
214 | mfspr r11,SRR0; /* save SRR0 */ \ | ||
215 | ori r12,r12,(label)@l; /* virt addr of handler */ \ | ||
216 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ | ||
217 | mtspr SRR0,r12; \ | ||
218 | mfspr r12,SRR1; /* and SRR1 */ \ | ||
219 | mtspr SRR1,r10; \ | ||
220 | rfid; \ | ||
221 | b . /* prevent speculative execution */ | ||
222 | |||
223 | /* | ||
224 | * This is the start of the interrupt handlers for iSeries | ||
225 | * This code runs with relocation on. | ||
226 | */ | ||
227 | #define EXCEPTION_PROLOG_ISERIES_1(area) \ | ||
228 | mfspr r13,SPRG3; /* get paca address into r13 */ \ | ||
229 | std r9,area+EX_R9(r13); /* save r9 - r12 */ \ | ||
230 | std r10,area+EX_R10(r13); \ | ||
231 | std r11,area+EX_R11(r13); \ | ||
232 | std r12,area+EX_R12(r13); \ | ||
233 | mfspr r9,SPRG1; \ | ||
234 | std r9,area+EX_R13(r13); \ | ||
235 | mfcr r9 | ||
236 | |||
237 | #define EXCEPTION_PROLOG_ISERIES_2 \ | ||
238 | mfmsr r10; \ | ||
239 | ld r11,PACALPPACA+LPPACASRR0(r13); \ | ||
240 | ld r12,PACALPPACA+LPPACASRR1(r13); \ | ||
241 | ori r10,r10,MSR_RI; \ | ||
242 | mtmsrd r10,1 | ||
243 | |||
244 | /* | ||
245 | * The common exception prolog is used for all except a few exceptions | ||
246 | * such as a segment miss on a kernel address. We have to be prepared | ||
247 | * to take another exception from the point where we first touch the | ||
248 | * kernel stack onwards. | ||
249 | * | ||
250 | * On entry r13 points to the paca, r9-r13 are saved in the paca, | ||
251 | * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and | ||
252 | * SRR1, and relocation is on. | ||
253 | */ | ||
254 | #define EXCEPTION_PROLOG_COMMON(n, area) \ | ||
255 | andi. r10,r12,MSR_PR; /* See if coming from user */ \ | ||
256 | mr r10,r1; /* Save r1 */ \ | ||
257 | subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ | ||
258 | beq- 1f; \ | ||
259 | ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ | ||
260 | 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ | ||
261 | bge- cr1,bad_stack; /* abort if it is */ \ | ||
262 | std r9,_CCR(r1); /* save CR in stackframe */ \ | ||
263 | std r11,_NIP(r1); /* save SRR0 in stackframe */ \ | ||
264 | std r12,_MSR(r1); /* save SRR1 in stackframe */ \ | ||
265 | std r10,0(r1); /* make stack chain pointer */ \ | ||
266 | std r0,GPR0(r1); /* save r0 in stackframe */ \ | ||
267 | std r10,GPR1(r1); /* save r1 in stackframe */ \ | ||
268 | std r2,GPR2(r1); /* save r2 in stackframe */ \ | ||
269 | SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ | ||
270 | SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ | ||
271 | ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ | ||
272 | ld r10,area+EX_R10(r13); \ | ||
273 | std r9,GPR9(r1); \ | ||
274 | std r10,GPR10(r1); \ | ||
275 | ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \ | ||
276 | ld r10,area+EX_R12(r13); \ | ||
277 | ld r11,area+EX_R13(r13); \ | ||
278 | std r9,GPR11(r1); \ | ||
279 | std r10,GPR12(r1); \ | ||
280 | std r11,GPR13(r1); \ | ||
281 | ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ | ||
282 | mflr r9; /* save LR in stackframe */ \ | ||
283 | std r9,_LINK(r1); \ | ||
284 | mfctr r10; /* save CTR in stackframe */ \ | ||
285 | std r10,_CTR(r1); \ | ||
286 | mfspr r11,XER; /* save XER in stackframe */ \ | ||
287 | std r11,_XER(r1); \ | ||
288 | li r9,(n)+1; \ | ||
289 | std r9,_TRAP(r1); /* set trap number */ \ | ||
290 | li r10,0; \ | ||
291 | ld r11,exception_marker@toc(r2); \ | ||
292 | std r10,RESULT(r1); /* clear regs->result */ \ | ||
293 | std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ | ||
294 | |||
295 | /* | ||
296 | * Exception vectors. | ||
297 | */ | ||
298 | #define STD_EXCEPTION_PSERIES(n, label) \ | ||
299 | . = n; \ | ||
300 | .globl label##_pSeries; \ | ||
301 | label##_pSeries: \ | ||
302 | HMT_MEDIUM; \ | ||
303 | mtspr SPRG1,r13; /* save r13 */ \ | ||
304 | RUNLATCH_ON(r13); \ | ||
305 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) | ||
306 | |||
307 | #define STD_EXCEPTION_ISERIES(n, label, area) \ | ||
308 | .globl label##_iSeries; \ | ||
309 | label##_iSeries: \ | ||
310 | HMT_MEDIUM; \ | ||
311 | mtspr SPRG1,r13; /* save r13 */ \ | ||
312 | RUNLATCH_ON(r13); \ | ||
313 | EXCEPTION_PROLOG_ISERIES_1(area); \ | ||
314 | EXCEPTION_PROLOG_ISERIES_2; \ | ||
315 | b label##_common | ||
316 | |||
317 | #define MASKABLE_EXCEPTION_ISERIES(n, label) \ | ||
318 | .globl label##_iSeries; \ | ||
319 | label##_iSeries: \ | ||
320 | HMT_MEDIUM; \ | ||
321 | mtspr SPRG1,r13; /* save r13 */ \ | ||
322 | RUNLATCH_ON(r13); \ | ||
323 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ | ||
324 | lbz r10,PACAPROCENABLED(r13); \ | ||
325 | cmpwi 0,r10,0; \ | ||
326 | beq- label##_iSeries_masked; \ | ||
327 | EXCEPTION_PROLOG_ISERIES_2; \ | ||
328 | b label##_common; \ | ||
329 | |||
330 | #ifdef DO_SOFT_DISABLE | ||
331 | #define DISABLE_INTS \ | ||
332 | lbz r10,PACAPROCENABLED(r13); \ | ||
333 | li r11,0; \ | ||
334 | std r10,SOFTE(r1); \ | ||
335 | mfmsr r10; \ | ||
336 | stb r11,PACAPROCENABLED(r13); \ | ||
337 | ori r10,r10,MSR_EE; \ | ||
338 | mtmsrd r10,1 | ||
339 | |||
340 | #define ENABLE_INTS \ | ||
341 | lbz r10,PACAPROCENABLED(r13); \ | ||
342 | mfmsr r11; \ | ||
343 | std r10,SOFTE(r1); \ | ||
344 | ori r11,r11,MSR_EE; \ | ||
345 | mtmsrd r11,1 | ||
346 | |||
347 | #else /* hard enable/disable interrupts */ | ||
348 | #define DISABLE_INTS | ||
349 | |||
350 | #define ENABLE_INTS \ | ||
351 | ld r12,_MSR(r1); \ | ||
352 | mfmsr r11; \ | ||
353 | rlwimi r11,r12,0,MSR_EE; \ | ||
354 | mtmsrd r11,1 | ||
355 | |||
356 | #endif | ||
357 | |||
358 | #define STD_EXCEPTION_COMMON(trap, label, hdlr) \ | ||
359 | .align 7; \ | ||
360 | .globl label##_common; \ | ||
361 | label##_common: \ | ||
362 | EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ | ||
363 | DISABLE_INTS; \ | ||
364 | bl .save_nvgprs; \ | ||
365 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
366 | bl hdlr; \ | ||
367 | b .ret_from_except | ||
368 | |||
369 | #define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \ | ||
370 | .align 7; \ | ||
371 | .globl label##_common; \ | ||
372 | label##_common: \ | ||
373 | EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ | ||
374 | DISABLE_INTS; \ | ||
375 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
376 | bl hdlr; \ | ||
377 | b .ret_from_except_lite | ||
378 | |||
379 | /* | ||
380 | * Start of pSeries system interrupt routines | ||
381 | */ | ||
382 | . = 0x100 | ||
383 | .globl __start_interrupts | ||
384 | __start_interrupts: | ||
385 | |||
386 | STD_EXCEPTION_PSERIES(0x100, system_reset) | ||
387 | |||
388 | . = 0x200 | ||
389 | _machine_check_pSeries: | ||
390 | HMT_MEDIUM | ||
391 | mtspr SPRG1,r13 /* save r13 */ | ||
392 | RUNLATCH_ON(r13) | ||
393 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | ||
394 | |||
395 | . = 0x300 | ||
396 | .globl data_access_pSeries | ||
397 | data_access_pSeries: | ||
398 | HMT_MEDIUM | ||
399 | mtspr SPRG1,r13 | ||
400 | BEGIN_FTR_SECTION | ||
401 | mtspr SPRG2,r12 | ||
402 | mfspr r13,DAR | ||
403 | mfspr r12,DSISR | ||
404 | srdi r13,r13,60 | ||
405 | rlwimi r13,r12,16,0x20 | ||
406 | mfcr r12 | ||
407 | cmpwi r13,0x2c | ||
408 | beq .do_stab_bolted_pSeries | ||
409 | mtcrf 0x80,r12 | ||
410 | mfspr r12,SPRG2 | ||
411 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | ||
412 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) | ||
413 | |||
414 | . = 0x380 | ||
415 | .globl data_access_slb_pSeries | ||
416 | data_access_slb_pSeries: | ||
417 | HMT_MEDIUM | ||
418 | mtspr SPRG1,r13 | ||
419 | RUNLATCH_ON(r13) | ||
420 | mfspr r13,SPRG3 /* get paca address into r13 */ | ||
421 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | ||
422 | std r10,PACA_EXSLB+EX_R10(r13) | ||
423 | std r11,PACA_EXSLB+EX_R11(r13) | ||
424 | std r12,PACA_EXSLB+EX_R12(r13) | ||
425 | std r3,PACA_EXSLB+EX_R3(r13) | ||
426 | mfspr r9,SPRG1 | ||
427 | std r9,PACA_EXSLB+EX_R13(r13) | ||
428 | mfcr r9 | ||
429 | mfspr r12,SRR1 /* and SRR1 */ | ||
430 | mfspr r3,DAR | ||
431 | b .do_slb_miss /* Rel. branch works in real mode */ | ||
432 | |||
433 | STD_EXCEPTION_PSERIES(0x400, instruction_access) | ||
434 | |||
435 | . = 0x480 | ||
436 | .globl instruction_access_slb_pSeries | ||
437 | instruction_access_slb_pSeries: | ||
438 | HMT_MEDIUM | ||
439 | mtspr SPRG1,r13 | ||
440 | RUNLATCH_ON(r13) | ||
441 | mfspr r13,SPRG3 /* get paca address into r13 */ | ||
442 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | ||
443 | std r10,PACA_EXSLB+EX_R10(r13) | ||
444 | std r11,PACA_EXSLB+EX_R11(r13) | ||
445 | std r12,PACA_EXSLB+EX_R12(r13) | ||
446 | std r3,PACA_EXSLB+EX_R3(r13) | ||
447 | mfspr r9,SPRG1 | ||
448 | std r9,PACA_EXSLB+EX_R13(r13) | ||
449 | mfcr r9 | ||
450 | mfspr r12,SRR1 /* and SRR1 */ | ||
451 | mfspr r3,SRR0 /* SRR0 is faulting address */ | ||
452 | b .do_slb_miss /* Rel. branch works in real mode */ | ||
453 | |||
454 | STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) | ||
455 | STD_EXCEPTION_PSERIES(0x600, alignment) | ||
456 | STD_EXCEPTION_PSERIES(0x700, program_check) | ||
457 | STD_EXCEPTION_PSERIES(0x800, fp_unavailable) | ||
458 | STD_EXCEPTION_PSERIES(0x900, decrementer) | ||
459 | STD_EXCEPTION_PSERIES(0xa00, trap_0a) | ||
460 | STD_EXCEPTION_PSERIES(0xb00, trap_0b) | ||
461 | |||
462 | . = 0xc00 | ||
463 | .globl system_call_pSeries | ||
464 | system_call_pSeries: | ||
465 | HMT_MEDIUM | ||
466 | RUNLATCH_ON(r9) | ||
467 | mr r9,r13 | ||
468 | mfmsr r10 | ||
469 | mfspr r13,SPRG3 | ||
470 | mfspr r11,SRR0 | ||
471 | clrrdi r12,r13,32 | ||
472 | oris r12,r12,system_call_common@h | ||
473 | ori r12,r12,system_call_common@l | ||
474 | mtspr SRR0,r12 | ||
475 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI | ||
476 | mfspr r12,SRR1 | ||
477 | mtspr SRR1,r10 | ||
478 | rfid | ||
479 | b . /* prevent speculative execution */ | ||
480 | |||
481 | STD_EXCEPTION_PSERIES(0xd00, single_step) | ||
482 | STD_EXCEPTION_PSERIES(0xe00, trap_0e) | ||
483 | |||
484 | /* We need to deal with the Altivec unavailable exception | ||
485 | * here which is at 0xf20, thus in the middle of the | ||
486 | * prolog code of the PerformanceMonitor one. A little | ||
487 | * trickery is thus necessary | ||
488 | */ | ||
489 | . = 0xf00 | ||
490 | b performance_monitor_pSeries | ||
491 | |||
492 | STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable) | ||
493 | |||
494 | STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) | ||
495 | STD_EXCEPTION_PSERIES(0x1700, altivec_assist) | ||
496 | |||
497 | . = 0x3000 | ||
498 | |||
499 | /*** pSeries interrupt support ***/ | ||
500 | |||
501 | /* moved from 0xf00 */ | ||
502 | STD_EXCEPTION_PSERIES(., performance_monitor) | ||
503 | |||
504 | .align 7 | ||
505 | _GLOBAL(do_stab_bolted_pSeries) | ||
506 | mtcrf 0x80,r12 | ||
507 | mfspr r12,SPRG2 | ||
508 | EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) | ||
509 | |||
510 | /* | ||
511 | * Vectors for the FWNMI option. Share common code. | ||
512 | */ | ||
513 | .globl system_reset_fwnmi | ||
514 | system_reset_fwnmi: | ||
515 | HMT_MEDIUM | ||
516 | mtspr SPRG1,r13 /* save r13 */ | ||
517 | RUNLATCH_ON(r13) | ||
518 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) | ||
519 | |||
520 | .globl machine_check_fwnmi | ||
521 | machine_check_fwnmi: | ||
522 | HMT_MEDIUM | ||
523 | mtspr SPRG1,r13 /* save r13 */ | ||
524 | RUNLATCH_ON(r13) | ||
525 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | ||
526 | |||
527 | #ifdef CONFIG_PPC_ISERIES | ||
528 | /*** ISeries-LPAR interrupt handlers ***/ | ||
529 | |||
530 | STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC) | ||
531 | |||
532 | .globl data_access_iSeries | ||
533 | data_access_iSeries: | ||
534 | mtspr SPRG1,r13 | ||
535 | BEGIN_FTR_SECTION | ||
536 | mtspr SPRG2,r12 | ||
537 | mfspr r13,DAR | ||
538 | mfspr r12,DSISR | ||
539 | srdi r13,r13,60 | ||
540 | rlwimi r13,r12,16,0x20 | ||
541 | mfcr r12 | ||
542 | cmpwi r13,0x2c | ||
543 | beq .do_stab_bolted_iSeries | ||
544 | mtcrf 0x80,r12 | ||
545 | mfspr r12,SPRG2 | ||
546 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | ||
547 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN) | ||
548 | EXCEPTION_PROLOG_ISERIES_2 | ||
549 | b data_access_common | ||
550 | |||
551 | .do_stab_bolted_iSeries: | ||
552 | mtcrf 0x80,r12 | ||
553 | mfspr r12,SPRG2 | ||
554 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) | ||
555 | EXCEPTION_PROLOG_ISERIES_2 | ||
556 | b .do_stab_bolted | ||
557 | |||
558 | .globl data_access_slb_iSeries | ||
559 | data_access_slb_iSeries: | ||
560 | mtspr SPRG1,r13 /* save r13 */ | ||
561 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) | ||
562 | std r3,PACA_EXSLB+EX_R3(r13) | ||
563 | ld r12,PACALPPACA+LPPACASRR1(r13) | ||
564 | mfspr r3,DAR | ||
565 | b .do_slb_miss | ||
566 | |||
567 | STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) | ||
568 | |||
569 | .globl instruction_access_slb_iSeries | ||
570 | instruction_access_slb_iSeries: | ||
571 | mtspr SPRG1,r13 /* save r13 */ | ||
572 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) | ||
573 | std r3,PACA_EXSLB+EX_R3(r13) | ||
574 | ld r12,PACALPPACA+LPPACASRR1(r13) | ||
575 | ld r3,PACALPPACA+LPPACASRR0(r13) | ||
576 | b .do_slb_miss | ||
577 | |||
578 | MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt) | ||
579 | STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN) | ||
580 | STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN) | ||
581 | STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN) | ||
582 | MASKABLE_EXCEPTION_ISERIES(0x900, decrementer) | ||
583 | STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN) | ||
584 | STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN) | ||
585 | |||
586 | .globl system_call_iSeries | ||
587 | system_call_iSeries: | ||
588 | mr r9,r13 | ||
589 | mfspr r13,SPRG3 | ||
590 | EXCEPTION_PROLOG_ISERIES_2 | ||
591 | b system_call_common | ||
592 | |||
593 | STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN) | ||
594 | STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN) | ||
595 | STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN) | ||
596 | |||
597 | .globl system_reset_iSeries | ||
598 | system_reset_iSeries: | ||
599 | mfspr r13,SPRG3 /* Get paca address */ | ||
600 | mfmsr r24 | ||
601 | ori r24,r24,MSR_RI | ||
602 | mtmsrd r24 /* RI on */ | ||
603 | lhz r24,PACAPACAINDEX(r13) /* Get processor # */ | ||
604 | cmpwi 0,r24,0 /* Are we processor 0? */ | ||
605 | beq .__start_initialization_iSeries /* Start up the first processor */ | ||
606 | mfspr r4,SPRN_CTRLF | ||
607 | li r5,CTRL_RUNLATCH /* Turn off the run light */ | ||
608 | andc r4,r4,r5 | ||
609 | mtspr SPRN_CTRLT,r4 | ||
610 | |||
611 | 1: | ||
612 | HMT_LOW | ||
613 | #ifdef CONFIG_SMP | ||
614 | lbz r23,PACAPROCSTART(r13) /* Test if this processor | ||
615 | * should start */ | ||
616 | sync | ||
617 | LOADADDR(r3,current_set) | ||
618 | sldi r28,r24,3 /* get current_set[cpu#] */ | ||
619 | ldx r3,r3,r28 | ||
620 | addi r1,r3,THREAD_SIZE | ||
621 | subi r1,r1,STACK_FRAME_OVERHEAD | ||
622 | |||
623 | cmpwi 0,r23,0 | ||
624 | beq iSeries_secondary_smp_loop /* Loop until told to go */ | ||
625 | bne .__secondary_start /* Loop until told to go */ | ||
626 | iSeries_secondary_smp_loop: | ||
627 | /* Let the Hypervisor know we are alive */ | ||
628 | /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ | ||
629 | lis r3,0x8002 | ||
630 | rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ | ||
631 | #else /* CONFIG_SMP */ | ||
632 | /* Yield the processor. This is required for non-SMP kernels | ||
633 | which are running on multi-threaded machines. */ | ||
634 | lis r3,0x8000 | ||
635 | rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */ | ||
636 | addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */ | ||
637 | li r4,0 /* "yield timed" */ | ||
638 | li r5,-1 /* "yield forever" */ | ||
639 | #endif /* CONFIG_SMP */ | ||
640 | li r0,-1 /* r0=-1 indicates a Hypervisor call */ | ||
641 | sc /* Invoke the hypervisor via a system call */ | ||
642 | mfspr r13,SPRG3 /* Put r13 back ???? */ | ||
643 | b 1b /* If SMP not configured, secondaries | ||
644 | * loop forever */ | ||
645 | |||
646 | .globl decrementer_iSeries_masked | ||
647 | decrementer_iSeries_masked: | ||
648 | li r11,1 | ||
649 | stb r11,PACALPPACA+LPPACADECRINT(r13) | ||
650 | lwz r12,PACADEFAULTDECR(r13) | ||
651 | mtspr SPRN_DEC,r12 | ||
652 | /* fall through */ | ||
653 | |||
654 | .globl hardware_interrupt_iSeries_masked | ||
655 | hardware_interrupt_iSeries_masked: | ||
656 | mtcrf 0x80,r9 /* Restore regs */ | ||
657 | ld r11,PACALPPACA+LPPACASRR0(r13) | ||
658 | ld r12,PACALPPACA+LPPACASRR1(r13) | ||
659 | mtspr SRR0,r11 | ||
660 | mtspr SRR1,r12 | ||
661 | ld r9,PACA_EXGEN+EX_R9(r13) | ||
662 | ld r10,PACA_EXGEN+EX_R10(r13) | ||
663 | ld r11,PACA_EXGEN+EX_R11(r13) | ||
664 | ld r12,PACA_EXGEN+EX_R12(r13) | ||
665 | ld r13,PACA_EXGEN+EX_R13(r13) | ||
666 | rfid | ||
667 | b . /* prevent speculative execution */ | ||
668 | #endif /* CONFIG_PPC_ISERIES */ | ||
669 | |||
670 | /*** Common interrupt handlers ***/ | ||
671 | |||
672 | STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) | ||
673 | |||
674 | /* | ||
675 | * Machine check is different because we use a different | ||
676 | * save area: PACA_EXMC instead of PACA_EXGEN. | ||
677 | */ | ||
678 | .align 7 | ||
679 | .globl machine_check_common | ||
680 | machine_check_common: | ||
681 | EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) | ||
682 | DISABLE_INTS | ||
683 | bl .save_nvgprs | ||
684 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
685 | bl .machine_check_exception | ||
686 | b .ret_from_except | ||
687 | |||
688 | STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) | ||
689 | STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) | ||
690 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) | ||
691 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) | ||
692 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) | ||
693 | STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception) | ||
694 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) | ||
695 | #ifdef CONFIG_ALTIVEC | ||
696 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) | ||
697 | #else | ||
698 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) | ||
699 | #endif | ||
700 | |||
701 | /* | ||
702 | * Here we have detected that the kernel stack pointer is bad. | ||
703 | * R9 contains the saved CR, r13 points to the paca, | ||
704 | * r10 contains the (bad) kernel stack pointer, | ||
705 | * r11 and r12 contain the saved SRR0 and SRR1. | ||
706 | * We switch to using an emergency stack, save the registers there, | ||
707 | * and call kernel_bad_stack(), which panics. | ||
708 | */ | ||
709 | bad_stack: | ||
710 | ld r1,PACAEMERGSP(r13) | ||
711 | subi r1,r1,64+INT_FRAME_SIZE | ||
712 | std r9,_CCR(r1) | ||
713 | std r10,GPR1(r1) | ||
714 | std r11,_NIP(r1) | ||
715 | std r12,_MSR(r1) | ||
716 | mfspr r11,DAR | ||
717 | mfspr r12,DSISR | ||
718 | std r11,_DAR(r1) | ||
719 | std r12,_DSISR(r1) | ||
720 | mflr r10 | ||
721 | mfctr r11 | ||
722 | mfxer r12 | ||
723 | std r10,_LINK(r1) | ||
724 | std r11,_CTR(r1) | ||
725 | std r12,_XER(r1) | ||
726 | SAVE_GPR(0,r1) | ||
727 | SAVE_GPR(2,r1) | ||
728 | SAVE_4GPRS(3,r1) | ||
729 | SAVE_2GPRS(7,r1) | ||
730 | SAVE_10GPRS(12,r1) | ||
731 | SAVE_10GPRS(22,r1) | ||
732 | addi r11,r1,INT_FRAME_SIZE | ||
733 | std r11,0(r1) | ||
734 | li r12,0 | ||
735 | std r12,0(r11) | ||
736 | ld r2,PACATOC(r13) | ||
737 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
738 | bl .kernel_bad_stack | ||
739 | b 1b | ||
740 | |||
741 | /* | ||
742 | * Return from an exception with minimal checks. | ||
743 | * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. | ||
744 | * If interrupts have been enabled, or anything has been | ||
745 | * done that might have changed the scheduling status of | ||
746 | * any task or sent any task a signal, you should use | ||
747 | * ret_from_except or ret_from_except_lite instead of this. | ||
748 | */ | ||
749 | fast_exception_return: | ||
750 | ld r12,_MSR(r1) | ||
751 | ld r11,_NIP(r1) | ||
752 | andi. r3,r12,MSR_RI /* check if RI is set */ | ||
753 | beq- unrecov_fer | ||
754 | ld r3,_CCR(r1) | ||
755 | ld r4,_LINK(r1) | ||
756 | ld r5,_CTR(r1) | ||
757 | ld r6,_XER(r1) | ||
758 | mtcr r3 | ||
759 | mtlr r4 | ||
760 | mtctr r5 | ||
761 | mtxer r6 | ||
762 | REST_GPR(0, r1) | ||
763 | REST_8GPRS(2, r1) | ||
764 | |||
765 | mfmsr r10 | ||
766 | clrrdi r10,r10,2 /* clear RI (LE is 0 already) */ | ||
767 | mtmsrd r10,1 | ||
768 | |||
769 | mtspr SRR1,r12 | ||
770 | mtspr SRR0,r11 | ||
771 | REST_4GPRS(10, r1) | ||
772 | ld r1,GPR1(r1) | ||
773 | rfid | ||
774 | b . /* prevent speculative execution */ | ||
775 | |||
776 | unrecov_fer: | ||
777 | bl .save_nvgprs | ||
778 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
779 | bl .unrecoverable_exception | ||
780 | b 1b | ||
781 | |||
782 | /* | ||
783 | * Here r13 points to the paca, r9 contains the saved CR, | ||
784 | * SRR0 and SRR1 are saved in r11 and r12, | ||
785 | * r9 - r13 are saved in paca->exgen. | ||
786 | */ | ||
787 | .align 7 | ||
788 | .globl data_access_common | ||
789 | data_access_common: | ||
790 | RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */ | ||
791 | mfspr r10,DAR | ||
792 | std r10,PACA_EXGEN+EX_DAR(r13) | ||
793 | mfspr r10,DSISR | ||
794 | stw r10,PACA_EXGEN+EX_DSISR(r13) | ||
795 | EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) | ||
796 | ld r3,PACA_EXGEN+EX_DAR(r13) | ||
797 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | ||
798 | li r5,0x300 | ||
799 | b .do_hash_page /* Try to handle as hpte fault */ | ||
800 | |||
801 | .align 7 | ||
802 | .globl instruction_access_common | ||
803 | instruction_access_common: | ||
804 | EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) | ||
805 | ld r3,_NIP(r1) | ||
806 | andis. r4,r12,0x5820 | ||
807 | li r5,0x400 | ||
808 | b .do_hash_page /* Try to handle as hpte fault */ | ||
809 | |||
810 | .align 7 | ||
811 | .globl hardware_interrupt_common | ||
812 | .globl hardware_interrupt_entry | ||
813 | hardware_interrupt_common: | ||
814 | EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) | ||
815 | hardware_interrupt_entry: | ||
816 | DISABLE_INTS | ||
817 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
818 | bl .do_IRQ | ||
819 | b .ret_from_except_lite | ||
820 | |||
821 | .align 7 | ||
822 | .globl alignment_common | ||
823 | alignment_common: | ||
824 | mfspr r10,DAR | ||
825 | std r10,PACA_EXGEN+EX_DAR(r13) | ||
826 | mfspr r10,DSISR | ||
827 | stw r10,PACA_EXGEN+EX_DSISR(r13) | ||
828 | EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) | ||
829 | ld r3,PACA_EXGEN+EX_DAR(r13) | ||
830 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | ||
831 | std r3,_DAR(r1) | ||
832 | std r4,_DSISR(r1) | ||
833 | bl .save_nvgprs | ||
834 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
835 | ENABLE_INTS | ||
836 | bl .alignment_exception | ||
837 | b .ret_from_except | ||
838 | |||
839 | .align 7 | ||
840 | .globl program_check_common | ||
841 | program_check_common: | ||
842 | EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) | ||
843 | bl .save_nvgprs | ||
844 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
845 | ENABLE_INTS | ||
846 | bl .program_check_exception | ||
847 | b .ret_from_except | ||
848 | |||
849 | .align 7 | ||
850 | .globl fp_unavailable_common | ||
851 | fp_unavailable_common: | ||
852 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) | ||
853 | bne .load_up_fpu /* if from user, just load it up */ | ||
854 | bl .save_nvgprs | ||
855 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
856 | ENABLE_INTS | ||
857 | bl .kernel_fp_unavailable_exception | ||
858 | BUG_OPCODE | ||
859 | |||
860 | /* | ||
861 | * load_up_fpu(unused, unused, tsk) | ||
862 | * Disable FP for the task which had the FPU previously, | ||
863 | * and save its floating-point registers in its thread_struct. | ||
864 | * Enables the FPU for use in the kernel on return. | ||
865 | * On SMP we know the fpu is free, since we give it up every | ||
866 | * switch (ie, no lazy save of the FP registers). | ||
867 | * On entry: r13 == 'current' && last_task_used_math != 'current' | ||
868 | */ | ||
869 | _STATIC(load_up_fpu) | ||
870 | mfmsr r5 /* grab the current MSR */ | ||
871 | ori r5,r5,MSR_FP | ||
872 | mtmsrd r5 /* enable use of fpu now */ | ||
873 | isync | ||
874 | /* | ||
875 | * For SMP, we don't do lazy FPU switching because it just gets too | ||
876 | * horrendously complex, especially when a task switches from one CPU | ||
877 | * to another. Instead we call giveup_fpu in switch_to. | ||
878 | * | ||
879 | */ | ||
880 | #ifndef CONFIG_SMP | ||
881 | ld r3,last_task_used_math@got(r2) | ||
882 | ld r4,0(r3) | ||
883 | cmpdi 0,r4,0 | ||
884 | beq 1f | ||
885 | /* Save FP state to last_task_used_math's THREAD struct */ | ||
886 | addi r4,r4,THREAD | ||
887 | SAVE_32FPRS(0, r4) | ||
888 | mffs fr0 | ||
889 | stfd fr0,THREAD_FPSCR(r4) | ||
890 | /* Disable FP for last_task_used_math */ | ||
891 | ld r5,PT_REGS(r4) | ||
892 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
893 | li r6,MSR_FP|MSR_FE0|MSR_FE1 | ||
894 | andc r4,r4,r6 | ||
895 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
896 | 1: | ||
897 | #endif /* CONFIG_SMP */ | ||
898 | /* enable use of FP after return */ | ||
899 | ld r4,PACACURRENT(r13) | ||
900 | addi r5,r4,THREAD /* Get THREAD */ | ||
901 | ld r4,THREAD_FPEXC_MODE(r5) | ||
902 | ori r12,r12,MSR_FP | ||
903 | or r12,r12,r4 | ||
904 | std r12,_MSR(r1) | ||
905 | lfd fr0,THREAD_FPSCR(r5) | ||
906 | mtfsf 0xff,fr0 | ||
907 | REST_32FPRS(0, r5) | ||
908 | #ifndef CONFIG_SMP | ||
909 | /* Update last_task_used_math to 'current' */ | ||
910 | subi r4,r5,THREAD /* Back to 'current' */ | ||
911 | std r4,0(r3) | ||
912 | #endif /* CONFIG_SMP */ | ||
913 | /* restore registers and return */ | ||
914 | b fast_exception_return | ||
915 | |||
916 | .align 7 | ||
917 | .globl altivec_unavailable_common | ||
918 | altivec_unavailable_common: | ||
919 | EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) | ||
920 | #ifdef CONFIG_ALTIVEC | ||
921 | BEGIN_FTR_SECTION | ||
922 | bne .load_up_altivec /* if from user, just load it up */ | ||
923 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
924 | #endif | ||
925 | bl .save_nvgprs | ||
926 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
927 | ENABLE_INTS | ||
928 | bl .altivec_unavailable_exception | ||
929 | b .ret_from_except | ||
930 | |||
931 | #ifdef CONFIG_ALTIVEC | ||
932 | /* | ||
933 | * load_up_altivec(unused, unused, tsk) | ||
934 | * Disable VMX for the task which had it previously, | ||
935 | * and save its vector registers in its thread_struct. | ||
936 | * Enables the VMX for use in the kernel on return. | ||
937 | * On SMP we know the VMX is free, since we give it up every | ||
938 | * switch (ie, no lazy save of the vector registers). | ||
939 | * On entry: r13 == 'current' && last_task_used_altivec != 'current' | ||
940 | */ | ||
941 | _STATIC(load_up_altivec) | ||
942 | mfmsr r5 /* grab the current MSR */ | ||
943 | oris r5,r5,MSR_VEC@h | ||
944 | mtmsrd r5 /* enable use of VMX now */ | ||
945 | isync | ||
946 | |||
947 | /* | ||
948 | * For SMP, we don't do lazy VMX switching because it just gets too | ||
949 | * horrendously complex, especially when a task switches from one CPU | ||
950 | * to another. Instead we call giveup_altvec in switch_to. | ||
951 | * VRSAVE isn't dealt with here, that is done in the normal context | ||
952 | * switch code. Note that we could rely on vrsave value to eventually | ||
953 | * avoid saving all of the VREGs here... | ||
954 | */ | ||
955 | #ifndef CONFIG_SMP | ||
956 | ld r3,last_task_used_altivec@got(r2) | ||
957 | ld r4,0(r3) | ||
958 | cmpdi 0,r4,0 | ||
959 | beq 1f | ||
960 | /* Save VMX state to last_task_used_altivec's THREAD struct */ | ||
961 | addi r4,r4,THREAD | ||
962 | SAVE_32VRS(0,r5,r4) | ||
963 | mfvscr vr0 | ||
964 | li r10,THREAD_VSCR | ||
965 | stvx vr0,r10,r4 | ||
966 | /* Disable VMX for last_task_used_altivec */ | ||
967 | ld r5,PT_REGS(r4) | ||
968 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
969 | lis r6,MSR_VEC@h | ||
970 | andc r4,r4,r6 | ||
971 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
972 | 1: | ||
973 | #endif /* CONFIG_SMP */ | ||
974 | /* Hack: if we get an altivec unavailable trap with VRSAVE | ||
975 | * set to all zeros, we assume this is a broken application | ||
976 | * that fails to set it properly, and thus we switch it to | ||
977 | * all 1's | ||
978 | */ | ||
979 | mfspr r4,SPRN_VRSAVE | ||
980 | cmpdi 0,r4,0 | ||
981 | bne+ 1f | ||
982 | li r4,-1 | ||
983 | mtspr SPRN_VRSAVE,r4 | ||
984 | 1: | ||
985 | /* enable use of VMX after return */ | ||
986 | ld r4,PACACURRENT(r13) | ||
987 | addi r5,r4,THREAD /* Get THREAD */ | ||
988 | oris r12,r12,MSR_VEC@h | ||
989 | std r12,_MSR(r1) | ||
990 | li r4,1 | ||
991 | li r10,THREAD_VSCR | ||
992 | stw r4,THREAD_USED_VR(r5) | ||
993 | lvx vr0,r10,r5 | ||
994 | mtvscr vr0 | ||
995 | REST_32VRS(0,r4,r5) | ||
996 | #ifndef CONFIG_SMP | ||
997 | /* Update last_task_used_math to 'current' */ | ||
998 | subi r4,r5,THREAD /* Back to 'current' */ | ||
999 | std r4,0(r3) | ||
1000 | #endif /* CONFIG_SMP */ | ||
1001 | /* restore registers and return */ | ||
1002 | b fast_exception_return | ||
1003 | #endif /* CONFIG_ALTIVEC */ | ||
1004 | |||
1005 | /* | ||
1006 | * Hash table stuff | ||
1007 | */ | ||
1008 | .align 7 | ||
1009 | _GLOBAL(do_hash_page) | ||
1010 | std r3,_DAR(r1) | ||
1011 | std r4,_DSISR(r1) | ||
1012 | |||
1013 | andis. r0,r4,0xa450 /* weird error? */ | ||
1014 | bne- .handle_page_fault /* if not, try to insert a HPTE */ | ||
1015 | BEGIN_FTR_SECTION | ||
1016 | andis. r0,r4,0x0020 /* Is it a segment table fault? */ | ||
1017 | bne- .do_ste_alloc /* If so handle it */ | ||
1018 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | ||
1019 | |||
1020 | /* | ||
1021 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are | ||
1022 | * accessing a userspace segment (even from the kernel). We assume | ||
1023 | * kernel addresses always have the high bit set. | ||
1024 | */ | ||
1025 | rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ | ||
1026 | rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ | ||
1027 | orc r0,r12,r0 /* MSR_PR | ~high_bit */ | ||
1028 | rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ | ||
1029 | ori r4,r4,1 /* add _PAGE_PRESENT */ | ||
1030 | rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ | ||
1031 | |||
1032 | /* | ||
1033 | * On iSeries, we soft-disable interrupts here, then | ||
1034 | * hard-enable interrupts so that the hash_page code can spin on | ||
1035 | * the hash_table_lock without problems on a shared processor. | ||
1036 | */ | ||
1037 | DISABLE_INTS | ||
1038 | |||
1039 | /* | ||
1040 | * r3 contains the faulting address | ||
1041 | * r4 contains the required access permissions | ||
1042 | * r5 contains the trap number | ||
1043 | * | ||
1044 | * at return r3 = 0 for success | ||
1045 | */ | ||
1046 | bl .hash_page /* build HPTE if possible */ | ||
1047 | cmpdi r3,0 /* see if hash_page succeeded */ | ||
1048 | |||
1049 | #ifdef DO_SOFT_DISABLE | ||
1050 | /* | ||
1051 | * If we had interrupts soft-enabled at the point where the | ||
1052 | * DSI/ISI occurred, and an interrupt came in during hash_page, | ||
1053 | * handle it now. | ||
1054 | * We jump to ret_from_except_lite rather than fast_exception_return | ||
1055 | * because ret_from_except_lite will check for and handle pending | ||
1056 | * interrupts if necessary. | ||
1057 | */ | ||
1058 | beq .ret_from_except_lite | ||
1059 | /* For a hash failure, we don't bother re-enabling interrupts */ | ||
1060 | ble- 12f | ||
1061 | |||
1062 | /* | ||
1063 | * hash_page couldn't handle it, set soft interrupt enable back | ||
1064 | * to what it was before the trap. Note that .local_irq_restore | ||
1065 | * handles any interrupts pending at this point. | ||
1066 | */ | ||
1067 | ld r3,SOFTE(r1) | ||
1068 | bl .local_irq_restore | ||
1069 | b 11f | ||
1070 | #else | ||
1071 | beq fast_exception_return /* Return from exception on success */ | ||
1072 | ble- 12f /* Failure return from hash_page */ | ||
1073 | |||
1074 | /* fall through */ | ||
1075 | #endif | ||
1076 | |||
1077 | /* Here we have a page fault that hash_page can't handle. */ | ||
1078 | _GLOBAL(handle_page_fault) | ||
1079 | ENABLE_INTS | ||
1080 | 11: ld r4,_DAR(r1) | ||
1081 | ld r5,_DSISR(r1) | ||
1082 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
1083 | bl .do_page_fault | ||
1084 | cmpdi r3,0 | ||
1085 | beq+ .ret_from_except_lite | ||
1086 | bl .save_nvgprs | ||
1087 | mr r5,r3 | ||
1088 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
1089 | lwz r4,_DAR(r1) | ||
1090 | bl .bad_page_fault | ||
1091 | b .ret_from_except | ||
1092 | |||
1093 | /* We have a page fault that hash_page could handle but HV refused | ||
1094 | * the PTE insertion | ||
1095 | */ | ||
1096 | 12: bl .save_nvgprs | ||
1097 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
1098 | lwz r4,_DAR(r1) | ||
1099 | bl .low_hash_fault | ||
1100 | b .ret_from_except | ||
1101 | |||
1102 | /* here we have a segment miss */ | ||
1103 | _GLOBAL(do_ste_alloc) | ||
1104 | bl .ste_allocate /* try to insert stab entry */ | ||
1105 | cmpdi r3,0 | ||
1106 | beq+ fast_exception_return | ||
1107 | b .handle_page_fault | ||
1108 | |||
1109 | /* | ||
1110 | * r13 points to the PACA, r9 contains the saved CR, | ||
1111 | * r11 and r12 contain the saved SRR0 and SRR1. | ||
1112 | * r9 - r13 are saved in paca->exslb. | ||
1113 | * We assume we aren't going to take any exceptions during this procedure. | ||
1114 | * We assume (DAR >> 60) == 0xc. | ||
1115 | */ | ||
1116 | .align 7 | ||
1117 | _GLOBAL(do_stab_bolted) | ||
1118 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | ||
1119 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ | ||
1120 | |||
1121 | /* Hash to the primary group */ | ||
1122 | ld r10,PACASTABVIRT(r13) | ||
1123 | mfspr r11,DAR | ||
1124 | srdi r11,r11,28 | ||
1125 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ | ||
1126 | |||
1127 | /* Calculate VSID */ | ||
1128 | /* This is a kernel address, so protovsid = ESID */ | ||
1129 | ASM_VSID_SCRAMBLE(r11, r9) | ||
1130 | rldic r9,r11,12,16 /* r9 = vsid << 12 */ | ||
1131 | |||
1132 | /* Search the primary group for a free entry */ | ||
1133 | 1: ld r11,0(r10) /* Test valid bit of the current ste */ | ||
1134 | andi. r11,r11,0x80 | ||
1135 | beq 2f | ||
1136 | addi r10,r10,16 | ||
1137 | andi. r11,r10,0x70 | ||
1138 | bne 1b | ||
1139 | |||
1140 | /* Stick for only searching the primary group for now. */ | ||
1141 | /* At least for now, we use a very simple random castout scheme */ | ||
1142 | /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ | ||
1143 | mftb r11 | ||
1144 | rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ | ||
1145 | ori r11,r11,0x10 | ||
1146 | |||
1147 | /* r10 currently points to an ste one past the group of interest */ | ||
1148 | /* make it point to the randomly selected entry */ | ||
1149 | subi r10,r10,128 | ||
1150 | or r10,r10,r11 /* r10 is the entry to invalidate */ | ||
1151 | |||
1152 | isync /* mark the entry invalid */ | ||
1153 | ld r11,0(r10) | ||
1154 | rldicl r11,r11,56,1 /* clear the valid bit */ | ||
1155 | rotldi r11,r11,8 | ||
1156 | std r11,0(r10) | ||
1157 | sync | ||
1158 | |||
1159 | clrrdi r11,r11,28 /* Get the esid part of the ste */ | ||
1160 | slbie r11 | ||
1161 | |||
1162 | 2: std r9,8(r10) /* Store the vsid part of the ste */ | ||
1163 | eieio | ||
1164 | |||
1165 | mfspr r11,DAR /* Get the new esid */ | ||
1166 | clrrdi r11,r11,28 /* Permits a full 32b of ESID */ | ||
1167 | ori r11,r11,0x90 /* Turn on valid and kp */ | ||
1168 | std r11,0(r10) /* Put new entry back into the stab */ | ||
1169 | |||
1170 | sync | ||
1171 | |||
1172 | /* All done -- return from exception. */ | ||
1173 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | ||
1174 | ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ | ||
1175 | |||
1176 | andi. r10,r12,MSR_RI | ||
1177 | beq- unrecov_slb | ||
1178 | |||
1179 | mtcrf 0x80,r9 /* restore CR */ | ||
1180 | |||
1181 | mfmsr r10 | ||
1182 | clrrdi r10,r10,2 | ||
1183 | mtmsrd r10,1 | ||
1184 | |||
1185 | mtspr SRR0,r11 | ||
1186 | mtspr SRR1,r12 | ||
1187 | ld r9,PACA_EXSLB+EX_R9(r13) | ||
1188 | ld r10,PACA_EXSLB+EX_R10(r13) | ||
1189 | ld r11,PACA_EXSLB+EX_R11(r13) | ||
1190 | ld r12,PACA_EXSLB+EX_R12(r13) | ||
1191 | ld r13,PACA_EXSLB+EX_R13(r13) | ||
1192 | rfid | ||
1193 | b . /* prevent speculative execution */ | ||
1194 | |||
1195 | /* | ||
1196 | * r13 points to the PACA, r9 contains the saved CR, | ||
1197 | * r11 and r12 contain the saved SRR0 and SRR1. | ||
1198 | * r3 has the faulting address | ||
1199 | * r9 - r13 are saved in paca->exslb. | ||
1200 | * r3 is saved in paca->slb_r3 | ||
1201 | * We assume we aren't going to take any exceptions during this procedure. | ||
1202 | */ | ||
1203 | _GLOBAL(do_slb_miss) | ||
1204 | mflr r10 | ||
1205 | |||
1206 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | ||
1207 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | ||
1208 | |||
1209 | bl .slb_allocate /* handle it */ | ||
1210 | |||
1211 | /* All done -- return from exception. */ | ||
1212 | |||
1213 | ld r10,PACA_EXSLB+EX_LR(r13) | ||
1214 | ld r3,PACA_EXSLB+EX_R3(r13) | ||
1215 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | ||
1216 | #ifdef CONFIG_PPC_ISERIES | ||
1217 | ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */ | ||
1218 | #endif /* CONFIG_PPC_ISERIES */ | ||
1219 | |||
1220 | mtlr r10 | ||
1221 | |||
1222 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | ||
1223 | beq- unrecov_slb | ||
1224 | |||
1225 | .machine push | ||
1226 | .machine "power4" | ||
1227 | mtcrf 0x80,r9 | ||
1228 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | ||
1229 | .machine pop | ||
1230 | |||
1231 | #ifdef CONFIG_PPC_ISERIES | ||
1232 | mtspr SRR0,r11 | ||
1233 | mtspr SRR1,r12 | ||
1234 | #endif /* CONFIG_PPC_ISERIES */ | ||
1235 | ld r9,PACA_EXSLB+EX_R9(r13) | ||
1236 | ld r10,PACA_EXSLB+EX_R10(r13) | ||
1237 | ld r11,PACA_EXSLB+EX_R11(r13) | ||
1238 | ld r12,PACA_EXSLB+EX_R12(r13) | ||
1239 | ld r13,PACA_EXSLB+EX_R13(r13) | ||
1240 | rfid | ||
1241 | b . /* prevent speculative execution */ | ||
1242 | |||
1243 | unrecov_slb: | ||
1244 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) | ||
1245 | DISABLE_INTS | ||
1246 | bl .save_nvgprs | ||
1247 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
1248 | bl .unrecoverable_exception | ||
1249 | b 1b | ||
1250 | |||
1251 | /* | ||
1252 | * Space for CPU0's segment table. | ||
1253 | * | ||
1254 | * On iSeries, the hypervisor must fill in at least one entry before | ||
1255 | * we get control (with relocate on). The address is give to the hv | ||
1256 | * as a page number (see xLparMap in LparData.c), so this must be at a | ||
1257 | * fixed address (the linker can't compute (u64)&initial_stab >> | ||
1258 | * PAGE_SHIFT). | ||
1259 | */ | ||
1260 | . = STAB0_PHYS_ADDR /* 0x6000 */ | ||
1261 | .globl initial_stab | ||
1262 | initial_stab: | ||
1263 | .space 4096 | ||
1264 | |||
1265 | /* | ||
1266 | * Data area reserved for FWNMI option. | ||
1267 | * This address (0x7000) is fixed by the RPA. | ||
1268 | */ | ||
1269 | .= 0x7000 | ||
1270 | .globl fwnmi_data_area | ||
1271 | fwnmi_data_area: | ||
1272 | |||
1273 | /* iSeries does not use the FWNMI stuff, so it is safe to put | ||
1274 | * this here, even if we later allow kernels that will boot on | ||
1275 | * both pSeries and iSeries */ | ||
1276 | #ifdef CONFIG_PPC_ISERIES | ||
1277 | . = LPARMAP_PHYS | ||
1278 | #include "lparmap.s" | ||
1279 | /* | ||
1280 | * This ".text" is here for old compilers that generate a trailing | ||
1281 | * .note section when compiling .c files to .s | ||
1282 | */ | ||
1283 | .text | ||
1284 | #endif /* CONFIG_PPC_ISERIES */ | ||
1285 | |||
1286 | . = 0x8000 | ||
1287 | |||
1288 | /* | ||
1289 | * On pSeries, secondary processors spin in the following code. | ||
1290 | * At entry, r3 = this processor's number (physical cpu id) | ||
1291 | */ | ||
1292 | _GLOBAL(pSeries_secondary_smp_init) | ||
1293 | mr r24,r3 | ||
1294 | |||
1295 | /* turn on 64-bit mode */ | ||
1296 | bl .enable_64b_mode | ||
1297 | isync | ||
1298 | |||
1299 | /* Copy some CPU settings from CPU 0 */ | ||
1300 | bl .__restore_cpu_setup | ||
1301 | |||
1302 | /* Set up a paca value for this processor. Since we have the | ||
1303 | * physical cpu id in r24, we need to search the pacas to find | ||
1304 | * which logical id maps to our physical one. | ||
1305 | */ | ||
1306 | LOADADDR(r13, paca) /* Get base vaddr of paca array */ | ||
1307 | li r5,0 /* logical cpu id */ | ||
1308 | 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ | ||
1309 | cmpw r6,r24 /* Compare to our id */ | ||
1310 | beq 2f | ||
1311 | addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ | ||
1312 | addi r5,r5,1 | ||
1313 | cmpwi r5,NR_CPUS | ||
1314 | blt 1b | ||
1315 | |||
1316 | mr r3,r24 /* not found, copy phys to r3 */ | ||
1317 | b .kexec_wait /* next kernel might do better */ | ||
1318 | |||
1319 | 2: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ | ||
1320 | /* From now on, r24 is expected to be logical cpuid */ | ||
1321 | mr r24,r5 | ||
1322 | 3: HMT_LOW | ||
1323 | lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ | ||
1324 | /* start. */ | ||
1325 | sync | ||
1326 | |||
1327 | /* Create a temp kernel stack for use before relocation is on. */ | ||
1328 | ld r1,PACAEMERGSP(r13) | ||
1329 | subi r1,r1,STACK_FRAME_OVERHEAD | ||
1330 | |||
1331 | cmpwi 0,r23,0 | ||
1332 | #ifdef CONFIG_SMP | ||
1333 | bne .__secondary_start | ||
1334 | #endif | ||
1335 | b 3b /* Loop until told to go */ | ||
1336 | |||
1337 | #ifdef CONFIG_PPC_ISERIES | ||
1338 | _STATIC(__start_initialization_iSeries) | ||
1339 | /* Clear out the BSS */ | ||
1340 | LOADADDR(r11,__bss_stop) | ||
1341 | LOADADDR(r8,__bss_start) | ||
1342 | sub r11,r11,r8 /* bss size */ | ||
1343 | addi r11,r11,7 /* round up to an even double word */ | ||
1344 | rldicl. r11,r11,61,3 /* shift right by 3 */ | ||
1345 | beq 4f | ||
1346 | addi r8,r8,-8 | ||
1347 | li r0,0 | ||
1348 | mtctr r11 /* zero this many doublewords */ | ||
1349 | 3: stdu r0,8(r8) | ||
1350 | bdnz 3b | ||
1351 | 4: | ||
1352 | LOADADDR(r1,init_thread_union) | ||
1353 | addi r1,r1,THREAD_SIZE | ||
1354 | li r0,0 | ||
1355 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | ||
1356 | |||
1357 | LOADADDR(r3,cpu_specs) | ||
1358 | LOADADDR(r4,cur_cpu_spec) | ||
1359 | li r5,0 | ||
1360 | bl .identify_cpu | ||
1361 | |||
1362 | LOADADDR(r2,__toc_start) | ||
1363 | addi r2,r2,0x4000 | ||
1364 | addi r2,r2,0x4000 | ||
1365 | |||
1366 | bl .iSeries_early_setup | ||
1367 | |||
1368 | /* relocation is on at this point */ | ||
1369 | |||
1370 | b .start_here_common | ||
1371 | #endif /* CONFIG_PPC_ISERIES */ | ||
1372 | |||
1373 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
1374 | |||
1375 | _STATIC(__mmu_off) | ||
1376 | mfmsr r3 | ||
1377 | andi. r0,r3,MSR_IR|MSR_DR | ||
1378 | beqlr | ||
1379 | andc r3,r3,r0 | ||
1380 | mtspr SPRN_SRR0,r4 | ||
1381 | mtspr SPRN_SRR1,r3 | ||
1382 | sync | ||
1383 | rfid | ||
1384 | b . /* prevent speculative execution */ | ||
1385 | |||
1386 | |||
1387 | /* | ||
1388 | * Here is our main kernel entry point. We support currently 2 kind of entries | ||
1389 | * depending on the value of r5. | ||
1390 | * | ||
1391 | * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content | ||
1392 | * in r3...r7 | ||
1393 | * | ||
1394 | * r5 == NULL -> kexec style entry. r3 is a physical pointer to the | ||
1395 | * DT block, r4 is a physical pointer to the kernel itself | ||
1396 | * | ||
1397 | */ | ||
1398 | _GLOBAL(__start_initialization_multiplatform) | ||
1399 | /* | ||
1400 | * Are we booted from a PROM Of-type client-interface ? | ||
1401 | */ | ||
1402 | cmpldi cr0,r5,0 | ||
1403 | bne .__boot_from_prom /* yes -> prom */ | ||
1404 | |||
1405 | /* Save parameters */ | ||
1406 | mr r31,r3 | ||
1407 | mr r30,r4 | ||
1408 | |||
1409 | /* Make sure we are running in 64 bits mode */ | ||
1410 | bl .enable_64b_mode | ||
1411 | |||
1412 | /* Setup some critical 970 SPRs before switching MMU off */ | ||
1413 | bl .__970_cpu_preinit | ||
1414 | |||
1415 | /* cpu # */ | ||
1416 | li r24,0 | ||
1417 | |||
1418 | /* Switch off MMU if not already */ | ||
1419 | LOADADDR(r4, .__after_prom_start - KERNELBASE) | ||
1420 | add r4,r4,r30 | ||
1421 | bl .__mmu_off | ||
1422 | b .__after_prom_start | ||
1423 | |||
1424 | _STATIC(__boot_from_prom) | ||
1425 | /* Save parameters */ | ||
1426 | mr r31,r3 | ||
1427 | mr r30,r4 | ||
1428 | mr r29,r5 | ||
1429 | mr r28,r6 | ||
1430 | mr r27,r7 | ||
1431 | |||
1432 | /* Make sure we are running in 64 bits mode */ | ||
1433 | bl .enable_64b_mode | ||
1434 | |||
1435 | /* put a relocation offset into r3 */ | ||
1436 | bl .reloc_offset | ||
1437 | |||
1438 | LOADADDR(r2,__toc_start) | ||
1439 | addi r2,r2,0x4000 | ||
1440 | addi r2,r2,0x4000 | ||
1441 | |||
1442 | /* Relocate the TOC from a virt addr to a real addr */ | ||
1443 | sub r2,r2,r3 | ||
1444 | |||
1445 | /* Restore parameters */ | ||
1446 | mr r3,r31 | ||
1447 | mr r4,r30 | ||
1448 | mr r5,r29 | ||
1449 | mr r6,r28 | ||
1450 | mr r7,r27 | ||
1451 | |||
1452 | /* Do all of the interaction with OF client interface */ | ||
1453 | bl .prom_init | ||
1454 | /* We never return */ | ||
1455 | trap | ||
1456 | |||
1457 | /* | ||
1458 | * At this point, r3 contains the physical address we are running at, | ||
1459 | * returned by prom_init() | ||
1460 | */ | ||
1461 | _STATIC(__after_prom_start) | ||
1462 | |||
1463 | /* | ||
1464 | * We need to run with __start at physical address 0. | ||
1465 | * This will leave some code in the first 256B of | ||
1466 | * real memory, which are reserved for software use. | ||
1467 | * The remainder of the first page is loaded with the fixed | ||
1468 | * interrupt vectors. The next two pages are filled with | ||
1469 | * unknown exception placeholders. | ||
1470 | * | ||
1471 | * Note: This process overwrites the OF exception vectors. | ||
1472 | * r26 == relocation offset | ||
1473 | * r27 == KERNELBASE | ||
1474 | */ | ||
1475 | bl .reloc_offset | ||
1476 | mr r26,r3 | ||
1477 | SET_REG_TO_CONST(r27,KERNELBASE) | ||
1478 | |||
1479 | li r3,0 /* target addr */ | ||
1480 | |||
1481 | // XXX FIXME: Use phys returned by OF (r30) | ||
1482 | sub r4,r27,r26 /* source addr */ | ||
1483 | /* current address of _start */ | ||
1484 | /* i.e. where we are running */ | ||
1485 | /* the source addr */ | ||
1486 | |||
1487 | LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */ | ||
1488 | sub r5,r5,r27 | ||
1489 | |||
1490 | li r6,0x100 /* Start offset, the first 0x100 */ | ||
1491 | /* bytes were copied earlier. */ | ||
1492 | |||
1493 | bl .copy_and_flush /* copy the first n bytes */ | ||
1494 | /* this includes the code being */ | ||
1495 | /* executed here. */ | ||
1496 | |||
1497 | LOADADDR(r0, 4f) /* Jump to the copy of this code */ | ||
1498 | mtctr r0 /* that we just made/relocated */ | ||
1499 | bctr | ||
1500 | |||
1501 | 4: LOADADDR(r5,klimit) | ||
1502 | sub r5,r5,r26 | ||
1503 | ld r5,0(r5) /* get the value of klimit */ | ||
1504 | sub r5,r5,r27 | ||
1505 | bl .copy_and_flush /* copy the rest */ | ||
1506 | b .start_here_multiplatform | ||
1507 | |||
1508 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | ||
1509 | |||
1510 | /* | ||
1511 | * Copy routine used to copy the kernel to start at physical address 0 | ||
1512 | * and flush and invalidate the caches as needed. | ||
1513 | * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset | ||
1514 | * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. | ||
1515 | * | ||
1516 | * Note: this routine *only* clobbers r0, r6 and lr | ||
1517 | */ | ||
1518 | _GLOBAL(copy_and_flush) | ||
1519 | addi r5,r5,-8 | ||
1520 | addi r6,r6,-8 | ||
1521 | 4: li r0,16 /* Use the least common */ | ||
1522 | /* denominator cache line */ | ||
1523 | /* size. This results in */ | ||
1524 | /* extra cache line flushes */ | ||
1525 | /* but operation is correct. */ | ||
1526 | /* Can't get cache line size */ | ||
1527 | /* from NACA as it is being */ | ||
1528 | /* moved too. */ | ||
1529 | |||
1530 | mtctr r0 /* put # words/line in ctr */ | ||
1531 | 3: addi r6,r6,8 /* copy a cache line */ | ||
1532 | ldx r0,r6,r4 | ||
1533 | stdx r0,r6,r3 | ||
1534 | bdnz 3b | ||
1535 | dcbst r6,r3 /* write it to memory */ | ||
1536 | sync | ||
1537 | icbi r6,r3 /* flush the icache line */ | ||
1538 | cmpld 0,r6,r5 | ||
1539 | blt 4b | ||
1540 | sync | ||
1541 | addi r5,r5,8 | ||
1542 | addi r6,r6,8 | ||
1543 | blr | ||
1544 | |||
1545 | .align 8 | ||
1546 | copy_to_here: | ||
1547 | |||
1548 | #ifdef CONFIG_SMP | ||
1549 | #ifdef CONFIG_PPC_PMAC | ||
1550 | /* | ||
1551 | * On PowerMac, secondary processors starts from the reset vector, which | ||
1552 | * is temporarily turned into a call to one of the functions below. | ||
1553 | */ | ||
1554 | .section ".text"; | ||
1555 | .align 2 ; | ||
1556 | |||
1557 | .globl pmac_secondary_start_1 | ||
1558 | pmac_secondary_start_1: | ||
1559 | li r24, 1 | ||
1560 | b .pmac_secondary_start | ||
1561 | |||
1562 | .globl pmac_secondary_start_2 | ||
1563 | pmac_secondary_start_2: | ||
1564 | li r24, 2 | ||
1565 | b .pmac_secondary_start | ||
1566 | |||
1567 | .globl pmac_secondary_start_3 | ||
1568 | pmac_secondary_start_3: | ||
1569 | li r24, 3 | ||
1570 | b .pmac_secondary_start | ||
1571 | |||
1572 | _GLOBAL(pmac_secondary_start) | ||
1573 | /* turn on 64-bit mode */ | ||
1574 | bl .enable_64b_mode | ||
1575 | isync | ||
1576 | |||
1577 | /* Copy some CPU settings from CPU 0 */ | ||
1578 | bl .__restore_cpu_setup | ||
1579 | |||
1580 | /* pSeries do that early though I don't think we really need it */ | ||
1581 | mfmsr r3 | ||
1582 | ori r3,r3,MSR_RI | ||
1583 | mtmsrd r3 /* RI on */ | ||
1584 | |||
1585 | /* Set up a paca value for this processor. */ | ||
1586 | LOADADDR(r4, paca) /* Get base vaddr of paca array */ | ||
1587 | mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ | ||
1588 | add r13,r13,r4 /* for this processor. */ | ||
1589 | mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ | ||
1590 | |||
1591 | /* Create a temp kernel stack for use before relocation is on. */ | ||
1592 | ld r1,PACAEMERGSP(r13) | ||
1593 | subi r1,r1,STACK_FRAME_OVERHEAD | ||
1594 | |||
1595 | b .__secondary_start | ||
1596 | |||
1597 | #endif /* CONFIG_PPC_PMAC */ | ||
1598 | |||
1599 | /* | ||
1600 | * This function is called after the master CPU has released the | ||
1601 | * secondary processors. The execution environment is relocation off. | ||
1602 | * The paca for this processor has the following fields initialized at | ||
1603 | * this point: | ||
1604 | * 1. Processor number | ||
1605 | * 2. Segment table pointer (virtual address) | ||
1606 | * On entry the following are set: | ||
1607 | * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries | ||
1608 | * r24 = cpu# (in Linux terms) | ||
1609 | * r13 = paca virtual address | ||
1610 | * SPRG3 = paca virtual address | ||
1611 | */ | ||
1612 | _GLOBAL(__secondary_start) | ||
1613 | |||
1614 | HMT_MEDIUM /* Set thread priority to MEDIUM */ | ||
1615 | |||
1616 | ld r2,PACATOC(r13) | ||
1617 | li r6,0 | ||
1618 | stb r6,PACAPROCENABLED(r13) | ||
1619 | |||
1620 | #ifndef CONFIG_PPC_ISERIES | ||
1621 | /* Initialize the page table pointer register. */ | ||
1622 | LOADADDR(r6,_SDR1) | ||
1623 | ld r6,0(r6) /* get the value of _SDR1 */ | ||
1624 | mtspr SDR1,r6 /* set the htab location */ | ||
1625 | #endif | ||
1626 | /* Initialize the first segment table (or SLB) entry */ | ||
1627 | ld r3,PACASTABVIRT(r13) /* get addr of segment table */ | ||
1628 | bl .stab_initialize | ||
1629 | |||
1630 | /* Initialize the kernel stack. Just a repeat for iSeries. */ | ||
1631 | LOADADDR(r3,current_set) | ||
1632 | sldi r28,r24,3 /* get current_set[cpu#] */ | ||
1633 | ldx r1,r3,r28 | ||
1634 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | ||
1635 | std r1,PACAKSAVE(r13) | ||
1636 | |||
1637 | ld r3,PACASTABREAL(r13) /* get raddr of segment table */ | ||
1638 | ori r4,r3,1 /* turn on valid bit */ | ||
1639 | |||
1640 | #ifdef CONFIG_PPC_ISERIES | ||
1641 | li r0,-1 /* hypervisor call */ | ||
1642 | li r3,1 | ||
1643 | sldi r3,r3,63 /* 0x8000000000000000 */ | ||
1644 | ori r3,r3,4 /* 0x8000000000000004 */ | ||
1645 | sc /* HvCall_setASR */ | ||
1646 | #else | ||
1647 | /* set the ASR */ | ||
1648 | ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ | ||
1649 | ld r3,0(r3) | ||
1650 | lwz r3,PLATFORM(r3) /* r3 = platform flags */ | ||
1651 | andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */ | ||
1652 | beq 98f /* branch if result is 0 */ | ||
1653 | mfspr r3,PVR | ||
1654 | srwi r3,r3,16 | ||
1655 | cmpwi r3,0x37 /* SStar */ | ||
1656 | beq 97f | ||
1657 | cmpwi r3,0x36 /* IStar */ | ||
1658 | beq 97f | ||
1659 | cmpwi r3,0x34 /* Pulsar */ | ||
1660 | bne 98f | ||
1661 | 97: li r3,H_SET_ASR /* hcall = H_SET_ASR */ | ||
1662 | HVSC /* Invoking hcall */ | ||
1663 | b 99f | ||
1664 | 98: /* !(rpa hypervisor) || !(star) */ | ||
1665 | mtasr r4 /* set the stab location */ | ||
1666 | 99: | ||
1667 | #endif | ||
1668 | li r7,0 | ||
1669 | mtlr r7 | ||
1670 | |||
1671 | /* enable MMU and jump to start_secondary */ | ||
1672 | LOADADDR(r3,.start_secondary_prolog) | ||
1673 | SET_REG_TO_CONST(r4, MSR_KERNEL) | ||
1674 | #ifdef DO_SOFT_DISABLE | ||
1675 | ori r4,r4,MSR_EE | ||
1676 | #endif | ||
1677 | mtspr SRR0,r3 | ||
1678 | mtspr SRR1,r4 | ||
1679 | rfid | ||
1680 | b . /* prevent speculative execution */ | ||
1681 | |||
1682 | /* | ||
1683 | * Running with relocation on at this point. All we want to do is | ||
1684 | * zero the stack back-chain pointer before going into C code. | ||
1685 | */ | ||
1686 | _GLOBAL(start_secondary_prolog) | ||
1687 | li r3,0 | ||
1688 | std r3,0(r1) /* Zero the stack frame pointer */ | ||
1689 | bl .start_secondary | ||
1690 | #endif | ||
1691 | |||
1692 | /* | ||
1693 | * This subroutine clobbers r11 and r12 | ||
1694 | */ | ||
1695 | _GLOBAL(enable_64b_mode) | ||
1696 | mfmsr r11 /* grab the current MSR */ | ||
1697 | li r12,1 | ||
1698 | rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) | ||
1699 | or r11,r11,r12 | ||
1700 | li r12,1 | ||
1701 | rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) | ||
1702 | or r11,r11,r12 | ||
1703 | mtmsrd r11 | ||
1704 | isync | ||
1705 | blr | ||
1706 | |||
1707 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
1708 | /* | ||
1709 | * This is where the main kernel code starts. | ||
1710 | */ | ||
1711 | _STATIC(start_here_multiplatform) | ||
1712 | /* get a new offset, now that the kernel has moved. */ | ||
1713 | bl .reloc_offset | ||
1714 | mr r26,r3 | ||
1715 | |||
1716 | /* Clear out the BSS. It may have been done in prom_init, | ||
1717 | * already but that's irrelevant since prom_init will soon | ||
1718 | * be detached from the kernel completely. Besides, we need | ||
1719 | * to clear it now for kexec-style entry. | ||
1720 | */ | ||
1721 | LOADADDR(r11,__bss_stop) | ||
1722 | LOADADDR(r8,__bss_start) | ||
1723 | sub r11,r11,r8 /* bss size */ | ||
1724 | addi r11,r11,7 /* round up to an even double word */ | ||
1725 | rldicl. r11,r11,61,3 /* shift right by 3 */ | ||
1726 | beq 4f | ||
1727 | addi r8,r8,-8 | ||
1728 | li r0,0 | ||
1729 | mtctr r11 /* zero this many doublewords */ | ||
1730 | 3: stdu r0,8(r8) | ||
1731 | bdnz 3b | ||
1732 | 4: | ||
1733 | |||
1734 | mfmsr r6 | ||
1735 | ori r6,r6,MSR_RI | ||
1736 | mtmsrd r6 /* RI on */ | ||
1737 | |||
1738 | #ifdef CONFIG_HMT | ||
1739 | /* Start up the second thread on cpu 0 */ | ||
1740 | mfspr r3,PVR | ||
1741 | srwi r3,r3,16 | ||
1742 | cmpwi r3,0x34 /* Pulsar */ | ||
1743 | beq 90f | ||
1744 | cmpwi r3,0x36 /* Icestar */ | ||
1745 | beq 90f | ||
1746 | cmpwi r3,0x37 /* SStar */ | ||
1747 | beq 90f | ||
1748 | b 91f /* HMT not supported */ | ||
1749 | 90: li r3,0 | ||
1750 | bl .hmt_start_secondary | ||
1751 | 91: | ||
1752 | #endif | ||
1753 | |||
1754 | /* The following gets the stack and TOC set up with the regs */ | ||
1755 | /* pointing to the real addr of the kernel stack. This is */ | ||
1756 | /* all done to support the C function call below which sets */ | ||
1757 | /* up the htab. This is done because we have relocated the */ | ||
1758 | /* kernel but are still running in real mode. */ | ||
1759 | |||
1760 | LOADADDR(r3,init_thread_union) | ||
1761 | sub r3,r3,r26 | ||
1762 | |||
1763 | /* set up a stack pointer (physical address) */ | ||
1764 | addi r1,r3,THREAD_SIZE | ||
1765 | li r0,0 | ||
1766 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | ||
1767 | |||
1768 | /* set up the TOC (physical address) */ | ||
1769 | LOADADDR(r2,__toc_start) | ||
1770 | addi r2,r2,0x4000 | ||
1771 | addi r2,r2,0x4000 | ||
1772 | sub r2,r2,r26 | ||
1773 | |||
1774 | LOADADDR(r3,cpu_specs) | ||
1775 | sub r3,r3,r26 | ||
1776 | LOADADDR(r4,cur_cpu_spec) | ||
1777 | sub r4,r4,r26 | ||
1778 | mr r5,r26 | ||
1779 | bl .identify_cpu | ||
1780 | |||
1781 | /* Save some low level config HIDs of CPU0 to be copied to | ||
1782 | * other CPUs later on, or used for suspend/resume | ||
1783 | */ | ||
1784 | bl .__save_cpu_setup | ||
1785 | sync | ||
1786 | |||
1787 | /* Setup a valid physical PACA pointer in SPRG3 for early_setup | ||
1788 | * note that boot_cpuid can always be 0 nowadays since there is | ||
1789 | * nowhere it can be initialized differently before we reach this | ||
1790 | * code | ||
1791 | */ | ||
1792 | LOADADDR(r27, boot_cpuid) | ||
1793 | sub r27,r27,r26 | ||
1794 | lwz r27,0(r27) | ||
1795 | |||
1796 | LOADADDR(r24, paca) /* Get base vaddr of paca array */ | ||
1797 | mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */ | ||
1798 | add r13,r13,r24 /* for this processor. */ | ||
1799 | sub r13,r13,r26 /* convert to physical addr */ | ||
1800 | mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */ | ||
1801 | |||
1802 | /* Do very early kernel initializations, including initial hash table, | ||
1803 | * stab and slb setup before we turn on relocation. */ | ||
1804 | |||
1805 | /* Restore parameters passed from prom_init/kexec */ | ||
1806 | mr r3,r31 | ||
1807 | bl .early_setup | ||
1808 | |||
1809 | /* set the ASR */ | ||
1810 | ld r3,PACASTABREAL(r13) | ||
1811 | ori r4,r3,1 /* turn on valid bit */ | ||
1812 | ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ | ||
1813 | ld r3,0(r3) | ||
1814 | lwz r3,PLATFORM(r3) /* r3 = platform flags */ | ||
1815 | andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */ | ||
1816 | beq 98f /* branch if result is 0 */ | ||
1817 | mfspr r3,PVR | ||
1818 | srwi r3,r3,16 | ||
1819 | cmpwi r3,0x37 /* SStar */ | ||
1820 | beq 97f | ||
1821 | cmpwi r3,0x36 /* IStar */ | ||
1822 | beq 97f | ||
1823 | cmpwi r3,0x34 /* Pulsar */ | ||
1824 | bne 98f | ||
1825 | 97: li r3,H_SET_ASR /* hcall = H_SET_ASR */ | ||
1826 | HVSC /* Invoking hcall */ | ||
1827 | b 99f | ||
1828 | 98: /* !(rpa hypervisor) || !(star) */ | ||
1829 | mtasr r4 /* set the stab location */ | ||
1830 | 99: | ||
1831 | /* Set SDR1 (hash table pointer) */ | ||
1832 | ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ | ||
1833 | ld r3,0(r3) | ||
1834 | lwz r3,PLATFORM(r3) /* r3 = platform flags */ | ||
1835 | /* Test if bit 0 is set (LPAR bit) */ | ||
1836 | andi. r3,r3,PLATFORM_LPAR | ||
1837 | bne 98f /* branch if result is !0 */ | ||
1838 | LOADADDR(r6,_SDR1) /* Only if NOT LPAR */ | ||
1839 | sub r6,r6,r26 | ||
1840 | ld r6,0(r6) /* get the value of _SDR1 */ | ||
1841 | mtspr SDR1,r6 /* set the htab location */ | ||
1842 | 98: | ||
1843 | LOADADDR(r3,.start_here_common) | ||
1844 | SET_REG_TO_CONST(r4, MSR_KERNEL) | ||
1845 | mtspr SRR0,r3 | ||
1846 | mtspr SRR1,r4 | ||
1847 | rfid | ||
1848 | b . /* prevent speculative execution */ | ||
1849 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | ||
1850 | |||
1851 | /* This is where all platforms converge execution */ | ||
1852 | _STATIC(start_here_common) | ||
1853 | /* relocation is on at this point */ | ||
1854 | |||
1855 | /* The following code sets up the SP and TOC now that we are */ | ||
1856 | /* running with translation enabled. */ | ||
1857 | |||
1858 | LOADADDR(r3,init_thread_union) | ||
1859 | |||
1860 | /* set up the stack */ | ||
1861 | addi r1,r3,THREAD_SIZE | ||
1862 | li r0,0 | ||
1863 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | ||
1864 | |||
1865 | /* Apply the CPUs-specific fixups (nop out sections not relevant | ||
1866 | * to this CPU | ||
1867 | */ | ||
1868 | li r3,0 | ||
1869 | bl .do_cpu_ftr_fixups | ||
1870 | |||
1871 | LOADADDR(r26, boot_cpuid) | ||
1872 | lwz r26,0(r26) | ||
1873 | |||
1874 | LOADADDR(r24, paca) /* Get base vaddr of paca array */ | ||
1875 | mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */ | ||
1876 | add r13,r13,r24 /* for this processor. */ | ||
1877 | mtspr SPRG3,r13 | ||
1878 | |||
1879 | /* ptr to current */ | ||
1880 | LOADADDR(r4,init_task) | ||
1881 | std r4,PACACURRENT(r13) | ||
1882 | |||
1883 | /* Load the TOC */ | ||
1884 | ld r2,PACATOC(r13) | ||
1885 | std r1,PACAKSAVE(r13) | ||
1886 | |||
1887 | bl .setup_system | ||
1888 | |||
1889 | /* Load up the kernel context */ | ||
1890 | 5: | ||
1891 | #ifdef DO_SOFT_DISABLE | ||
1892 | li r5,0 | ||
1893 | stb r5,PACAPROCENABLED(r13) /* Soft Disabled */ | ||
1894 | mfmsr r5 | ||
1895 | ori r5,r5,MSR_EE /* Hard Enabled */ | ||
1896 | mtmsrd r5 | ||
1897 | #endif | ||
1898 | |||
1899 | bl .start_kernel | ||
1900 | |||
1901 | _GLOBAL(hmt_init) | ||
1902 | #ifdef CONFIG_HMT | ||
1903 | LOADADDR(r5, hmt_thread_data) | ||
1904 | mfspr r7,PVR | ||
1905 | srwi r7,r7,16 | ||
1906 | cmpwi r7,0x34 /* Pulsar */ | ||
1907 | beq 90f | ||
1908 | cmpwi r7,0x36 /* Icestar */ | ||
1909 | beq 91f | ||
1910 | cmpwi r7,0x37 /* SStar */ | ||
1911 | beq 91f | ||
1912 | b 101f | ||
1913 | 90: mfspr r6,PIR | ||
1914 | andi. r6,r6,0x1f | ||
1915 | b 92f | ||
1916 | 91: mfspr r6,PIR | ||
1917 | andi. r6,r6,0x3ff | ||
1918 | 92: sldi r4,r24,3 | ||
1919 | stwx r6,r5,r4 | ||
1920 | bl .hmt_start_secondary | ||
1921 | b 101f | ||
1922 | |||
1923 | __hmt_secondary_hold: | ||
1924 | LOADADDR(r5, hmt_thread_data) | ||
1925 | clrldi r5,r5,4 | ||
1926 | li r7,0 | ||
1927 | mfspr r6,PIR | ||
1928 | mfspr r8,PVR | ||
1929 | srwi r8,r8,16 | ||
1930 | cmpwi r8,0x34 | ||
1931 | bne 93f | ||
1932 | andi. r6,r6,0x1f | ||
1933 | b 103f | ||
1934 | 93: andi. r6,r6,0x3f | ||
1935 | |||
1936 | 103: lwzx r8,r5,r7 | ||
1937 | cmpw r8,r6 | ||
1938 | beq 104f | ||
1939 | addi r7,r7,8 | ||
1940 | b 103b | ||
1941 | |||
1942 | 104: addi r7,r7,4 | ||
1943 | lwzx r9,r5,r7 | ||
1944 | mr r24,r9 | ||
1945 | 101: | ||
1946 | #endif | ||
1947 | mr r3,r24 | ||
1948 | b .pSeries_secondary_smp_init | ||
1949 | |||
1950 | #ifdef CONFIG_HMT | ||
1951 | _GLOBAL(hmt_start_secondary) | ||
1952 | LOADADDR(r4,__hmt_secondary_hold) | ||
1953 | clrldi r4,r4,4 | ||
1954 | mtspr NIADORM, r4 | ||
1955 | mfspr r4, MSRDORM | ||
1956 | li r5, -65 | ||
1957 | and r4, r4, r5 | ||
1958 | mtspr MSRDORM, r4 | ||
1959 | lis r4,0xffef | ||
1960 | ori r4,r4,0x7403 | ||
1961 | mtspr TSC, r4 | ||
1962 | li r4,0x1f4 | ||
1963 | mtspr TST, r4 | ||
1964 | mfspr r4, HID0 | ||
1965 | ori r4, r4, 0x1 | ||
1966 | mtspr HID0, r4 | ||
1967 | mfspr r4, SPRN_CTRLF | ||
1968 | oris r4, r4, 0x40 | ||
1969 | mtspr SPRN_CTRLT, r4 | ||
1970 | blr | ||
1971 | #endif | ||
1972 | |||
1973 | #if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)) | ||
1974 | _GLOBAL(smp_release_cpus) | ||
1975 | /* All secondary cpus are spinning on a common | ||
1976 | * spinloop, release them all now so they can start | ||
1977 | * to spin on their individual paca spinloops. | ||
1978 | * For non SMP kernels, the secondary cpus never | ||
1979 | * get out of the common spinloop. | ||
1980 | */ | ||
1981 | li r3,1 | ||
1982 | LOADADDR(r5,__secondary_hold_spinloop) | ||
1983 | std r3,0(r5) | ||
1984 | sync | ||
1985 | blr | ||
1986 | #endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */ | ||
1987 | |||
1988 | |||
1989 | /* | ||
1990 | * We put a few things here that have to be page-aligned. | ||
1991 | * This stuff goes at the beginning of the bss, which is page-aligned. | ||
1992 | */ | ||
1993 | .section ".bss" | ||
1994 | |||
1995 | .align PAGE_SHIFT | ||
1996 | |||
1997 | .globl empty_zero_page | ||
1998 | empty_zero_page: | ||
1999 | .space PAGE_SIZE | ||
2000 | |||
2001 | .globl swapper_pg_dir | ||
2002 | swapper_pg_dir: | ||
2003 | .space PAGE_SIZE | ||
2004 | |||
2005 | /* | ||
2006 | * This space gets a copy of optional info passed to us by the bootstrap | ||
2007 | * Used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
2008 | */ | ||
2009 | .globl cmd_line | ||
2010 | cmd_line: | ||
2011 | .space COMMAND_LINE_SIZE | ||
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S new file mode 100644 index 000000000000..cb1a3a54a026 --- /dev/null +++ b/arch/powerpc/kernel/head_8xx.S | |||
@@ -0,0 +1,860 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/except_8xx.S | ||
3 | * | ||
4 | * PowerPC version | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | ||
7 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
8 | * Low-level exception handlers and MMU support | ||
9 | * rewritten by Paul Mackerras. | ||
10 | * Copyright (C) 1996 Paul Mackerras. | ||
11 | * MPC8xx modifications by Dan Malek | ||
12 | * Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | ||
13 | * | ||
14 | * This file contains low-level support and setup for PowerPC 8xx | ||
15 | * embedded processors, including trap and interrupt dispatch. | ||
16 | * | ||
17 | * This program is free software; you can redistribute it and/or | ||
18 | * modify it under the terms of the GNU General Public License | ||
19 | * as published by the Free Software Foundation; either version | ||
20 | * 2 of the License, or (at your option) any later version. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/config.h> | ||
25 | #include <asm/processor.h> | ||
26 | #include <asm/page.h> | ||
27 | #include <asm/mmu.h> | ||
28 | #include <asm/cache.h> | ||
29 | #include <asm/pgtable.h> | ||
30 | #include <asm/cputable.h> | ||
31 | #include <asm/thread_info.h> | ||
32 | #include <asm/ppc_asm.h> | ||
33 | #include <asm/asm-offsets.h> | ||
34 | |||
35 | /* Macro to make the code more readable. */ | ||
36 | #ifdef CONFIG_8xx_CPU6 | ||
37 | #define DO_8xx_CPU6(val, reg) \ | ||
38 | li reg, val; \ | ||
39 | stw reg, 12(r0); \ | ||
40 | lwz reg, 12(r0); | ||
41 | #else | ||
42 | #define DO_8xx_CPU6(val, reg) | ||
43 | #endif | ||
44 | .text | ||
45 | .globl _stext | ||
46 | _stext: | ||
47 | .text | ||
48 | .globl _start | ||
49 | _start: | ||
50 | |||
51 | /* MPC8xx | ||
52 | * This port was done on an MBX board with an 860. Right now I only | ||
53 | * support an ELF compressed (zImage) boot from EPPC-Bug because the | ||
54 | * code there loads up some registers before calling us: | ||
55 | * r3: ptr to board info data | ||
56 | * r4: initrd_start or if no initrd then 0 | ||
57 | * r5: initrd_end - unused if r4 is 0 | ||
58 | * r6: Start of command line string | ||
59 | * r7: End of command line string | ||
60 | * | ||
61 | * I decided to use conditional compilation instead of checking PVR and | ||
62 | * adding more processor specific branches around code I don't need. | ||
63 | * Since this is an embedded processor, I also appreciate any memory | ||
64 | * savings I can get. | ||
65 | * | ||
66 | * The MPC8xx does not have any BATs, but it supports large page sizes. | ||
67 | * We first initialize the MMU to support 8M byte pages, then load one | ||
68 | * entry into each of the instruction and data TLBs to map the first | ||
69 | * 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to | ||
70 | * the "internal" processor registers before MMU_init is called. | ||
71 | * | ||
72 | * The TLB code currently contains a major hack. Since I use the condition | ||
73 | * code register, I have to save and restore it. I am out of registers, so | ||
74 | * I just store it in memory location 0 (the TLB handlers are not reentrant). | ||
75 | * To avoid making any decisions, I need to use the "segment" valid bit | ||
76 | * in the first level table, but that would require many changes to the | ||
77 | * Linux page directory/table functions that I don't want to do right now. | ||
78 | * | ||
79 | * I used to use SPRG2 for a temporary register in the TLB handler, but it | ||
80 | * has since been put to other uses. I now use a hack to save a register | ||
81 | * and the CCR at memory location 0.....Someday I'll fix this..... | ||
82 | * -- Dan | ||
83 | */ | ||
84 | .globl __start | ||
85 | __start: | ||
86 | mr r31,r3 /* save parameters */ | ||
87 | mr r30,r4 | ||
88 | mr r29,r5 | ||
89 | mr r28,r6 | ||
90 | mr r27,r7 | ||
91 | |||
92 | /* We have to turn on the MMU right away so we get cache modes | ||
93 | * set correctly. | ||
94 | */ | ||
95 | bl initial_mmu | ||
96 | |||
97 | /* We now have the lower 8 Meg mapped into TLB entries, and the caches | ||
98 | * ready to work. | ||
99 | */ | ||
100 | |||
101 | turn_on_mmu: | ||
102 | mfmsr r0 | ||
103 | ori r0,r0,MSR_DR|MSR_IR | ||
104 | mtspr SPRN_SRR1,r0 | ||
105 | lis r0,start_here@h | ||
106 | ori r0,r0,start_here@l | ||
107 | mtspr SPRN_SRR0,r0 | ||
108 | SYNC | ||
109 | rfi /* enables MMU */ | ||
110 | |||
111 | /* | ||
112 | * Exception entry code. This code runs with address translation | ||
113 | * turned off, i.e. using physical addresses. | ||
114 | * We assume sprg3 has the physical address of the current | ||
115 | * task's thread_struct. | ||
116 | */ | ||
117 | #define EXCEPTION_PROLOG \ | ||
118 | mtspr SPRN_SPRG0,r10; \ | ||
119 | mtspr SPRN_SPRG1,r11; \ | ||
120 | mfcr r10; \ | ||
121 | EXCEPTION_PROLOG_1; \ | ||
122 | EXCEPTION_PROLOG_2 | ||
123 | |||
124 | #define EXCEPTION_PROLOG_1 \ | ||
125 | mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \ | ||
126 | andi. r11,r11,MSR_PR; \ | ||
127 | tophys(r11,r1); /* use tophys(r1) if kernel */ \ | ||
128 | beq 1f; \ | ||
129 | mfspr r11,SPRN_SPRG3; \ | ||
130 | lwz r11,THREAD_INFO-THREAD(r11); \ | ||
131 | addi r11,r11,THREAD_SIZE; \ | ||
132 | tophys(r11,r11); \ | ||
133 | 1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */ | ||
134 | |||
135 | |||
136 | #define EXCEPTION_PROLOG_2 \ | ||
137 | CLR_TOP32(r11); \ | ||
138 | stw r10,_CCR(r11); /* save registers */ \ | ||
139 | stw r12,GPR12(r11); \ | ||
140 | stw r9,GPR9(r11); \ | ||
141 | mfspr r10,SPRN_SPRG0; \ | ||
142 | stw r10,GPR10(r11); \ | ||
143 | mfspr r12,SPRN_SPRG1; \ | ||
144 | stw r12,GPR11(r11); \ | ||
145 | mflr r10; \ | ||
146 | stw r10,_LINK(r11); \ | ||
147 | mfspr r12,SPRN_SRR0; \ | ||
148 | mfspr r9,SPRN_SRR1; \ | ||
149 | stw r1,GPR1(r11); \ | ||
150 | stw r1,0(r11); \ | ||
151 | tovirt(r1,r11); /* set new kernel sp */ \ | ||
152 | li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ | ||
153 | MTMSRD(r10); /* (except for mach check in rtas) */ \ | ||
154 | stw r0,GPR0(r11); \ | ||
155 | SAVE_4GPRS(3, r11); \ | ||
156 | SAVE_2GPRS(7, r11) | ||
157 | |||
158 | /* | ||
159 | * Note: code which follows this uses cr0.eq (set if from kernel), | ||
160 | * r11, r12 (SRR0), and r9 (SRR1). | ||
161 | * | ||
162 | * Note2: once we have set r1 we are in a position to take exceptions | ||
163 | * again, and we could thus set MSR:RI at that point. | ||
164 | */ | ||
165 | |||
166 | /* | ||
167 | * Exception vectors. | ||
168 | */ | ||
169 | #define EXCEPTION(n, label, hdlr, xfer) \ | ||
170 | . = n; \ | ||
171 | label: \ | ||
172 | EXCEPTION_PROLOG; \ | ||
173 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
174 | xfer(n, hdlr) | ||
175 | |||
176 | #define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \ | ||
177 | li r10,trap; \ | ||
178 | stw r10,TRAP(r11); \ | ||
179 | li r10,MSR_KERNEL; \ | ||
180 | copyee(r10, r9); \ | ||
181 | bl tfer; \ | ||
182 | i##n: \ | ||
183 | .long hdlr; \ | ||
184 | .long ret | ||
185 | |||
186 | #define COPY_EE(d, s) rlwimi d,s,0,16,16 | ||
187 | #define NOCOPY(d, s) | ||
188 | |||
189 | #define EXC_XFER_STD(n, hdlr) \ | ||
190 | EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \ | ||
191 | ret_from_except_full) | ||
192 | |||
193 | #define EXC_XFER_LITE(n, hdlr) \ | ||
194 | EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \ | ||
195 | ret_from_except) | ||
196 | |||
197 | #define EXC_XFER_EE(n, hdlr) \ | ||
198 | EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \ | ||
199 | ret_from_except_full) | ||
200 | |||
201 | #define EXC_XFER_EE_LITE(n, hdlr) \ | ||
202 | EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \ | ||
203 | ret_from_except) | ||
204 | |||
205 | /* System reset */ | ||
206 | EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD) | ||
207 | |||
208 | /* Machine check */ | ||
209 | . = 0x200 | ||
210 | MachineCheck: | ||
211 | EXCEPTION_PROLOG | ||
212 | mfspr r4,SPRN_DAR | ||
213 | stw r4,_DAR(r11) | ||
214 | mfspr r5,SPRN_DSISR | ||
215 | stw r5,_DSISR(r11) | ||
216 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
217 | EXC_XFER_STD(0x200, MachineCheckException) | ||
218 | |||
219 | /* Data access exception. | ||
220 | * This is "never generated" by the MPC8xx. We jump to it for other | ||
221 | * translation errors. | ||
222 | */ | ||
223 | . = 0x300 | ||
224 | DataAccess: | ||
225 | EXCEPTION_PROLOG | ||
226 | mfspr r10,SPRN_DSISR | ||
227 | stw r10,_DSISR(r11) | ||
228 | mr r5,r10 | ||
229 | mfspr r4,SPRN_DAR | ||
230 | EXC_XFER_EE_LITE(0x300, handle_page_fault) | ||
231 | |||
232 | /* Instruction access exception. | ||
233 | * This is "never generated" by the MPC8xx. We jump to it for other | ||
234 | * translation errors. | ||
235 | */ | ||
236 | . = 0x400 | ||
237 | InstructionAccess: | ||
238 | EXCEPTION_PROLOG | ||
239 | mr r4,r12 | ||
240 | mr r5,r9 | ||
241 | EXC_XFER_EE_LITE(0x400, handle_page_fault) | ||
242 | |||
243 | /* External interrupt */ | ||
244 | EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) | ||
245 | |||
246 | /* Alignment exception */ | ||
247 | . = 0x600 | ||
248 | Alignment: | ||
249 | EXCEPTION_PROLOG | ||
250 | mfspr r4,SPRN_DAR | ||
251 | stw r4,_DAR(r11) | ||
252 | mfspr r5,SPRN_DSISR | ||
253 | stw r5,_DSISR(r11) | ||
254 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
255 | EXC_XFER_EE(0x600, AlignmentException) | ||
256 | |||
257 | /* Program check exception */ | ||
258 | EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD) | ||
259 | |||
260 | /* No FPU on MPC8xx. This exception is not supposed to happen. | ||
261 | */ | ||
262 | EXCEPTION(0x800, FPUnavailable, UnknownException, EXC_XFER_STD) | ||
263 | |||
264 | /* Decrementer */ | ||
265 | EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) | ||
266 | |||
267 | EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE) | ||
268 | EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE) | ||
269 | |||
270 | /* System call */ | ||
271 | . = 0xc00 | ||
272 | SystemCall: | ||
273 | EXCEPTION_PROLOG | ||
274 | EXC_XFER_EE_LITE(0xc00, DoSyscall) | ||
275 | |||
276 | /* Single step - not used on 601 */ | ||
277 | EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD) | ||
278 | EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE) | ||
279 | EXCEPTION(0xf00, Trap_0f, UnknownException, EXC_XFER_EE) | ||
280 | |||
281 | /* On the MPC8xx, this is a software emulation interrupt. It occurs | ||
282 | * for all unimplemented and illegal instructions. | ||
283 | */ | ||
284 | EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD) | ||
285 | |||
286 | . = 0x1100 | ||
287 | /* | ||
288 | * For the MPC8xx, this is a software tablewalk to load the instruction | ||
289 | * TLB. It is modelled after the example in the Motorola manual. The task | ||
290 | * switch loads the M_TWB register with the pointer to the first level table. | ||
291 | * If we discover there is no second level table (value is zero) or if there | ||
292 | * is an invalid pte, we load that into the TLB, which causes another fault | ||
293 | * into the TLB Error interrupt where we can handle such problems. | ||
294 | * We have to use the MD_xxx registers for the tablewalk because the | ||
295 | * equivalent MI_xxx registers only perform the attribute functions. | ||
296 | */ | ||
297 | InstructionTLBMiss: | ||
298 | #ifdef CONFIG_8xx_CPU6 | ||
299 | stw r3, 8(r0) | ||
300 | #endif | ||
301 | DO_8xx_CPU6(0x3f80, r3) | ||
302 | mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ | ||
303 | mfcr r10 | ||
304 | stw r10, 0(r0) | ||
305 | stw r11, 4(r0) | ||
306 | mfspr r10, SPRN_SRR0 /* Get effective address of fault */ | ||
307 | DO_8xx_CPU6(0x3780, r3) | ||
308 | mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */ | ||
309 | mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ | ||
310 | |||
311 | /* If we are faulting a kernel address, we have to use the | ||
312 | * kernel page tables. | ||
313 | */ | ||
314 | andi. r11, r10, 0x0800 /* Address >= 0x80000000 */ | ||
315 | beq 3f | ||
316 | lis r11, swapper_pg_dir@h | ||
317 | ori r11, r11, swapper_pg_dir@l | ||
318 | rlwimi r10, r11, 0, 2, 19 | ||
319 | 3: | ||
320 | lwz r11, 0(r10) /* Get the level 1 entry */ | ||
321 | rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ | ||
322 | beq 2f /* If zero, don't try to find a pte */ | ||
323 | |||
324 | /* We have a pte table, so load the MI_TWC with the attributes | ||
325 | * for this "segment." | ||
326 | */ | ||
327 | ori r11,r11,1 /* Set valid bit */ | ||
328 | DO_8xx_CPU6(0x2b80, r3) | ||
329 | mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ | ||
330 | DO_8xx_CPU6(0x3b80, r3) | ||
331 | mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ | ||
332 | mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ | ||
333 | lwz r10, 0(r11) /* Get the pte */ | ||
334 | |||
335 | ori r10, r10, _PAGE_ACCESSED | ||
336 | stw r10, 0(r11) | ||
337 | |||
338 | /* The Linux PTE won't go exactly into the MMU TLB. | ||
339 | * Software indicator bits 21, 22 and 28 must be clear. | ||
340 | * Software indicator bits 24, 25, 26, and 27 must be | ||
341 | * set. All other Linux PTE bits control the behavior | ||
342 | * of the MMU. | ||
343 | */ | ||
344 | 2: li r11, 0x00f0 | ||
345 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ | ||
346 | DO_8xx_CPU6(0x2d80, r3) | ||
347 | mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ | ||
348 | |||
349 | mfspr r10, SPRN_M_TW /* Restore registers */ | ||
350 | lwz r11, 0(r0) | ||
351 | mtcr r11 | ||
352 | lwz r11, 4(r0) | ||
353 | #ifdef CONFIG_8xx_CPU6 | ||
354 | lwz r3, 8(r0) | ||
355 | #endif | ||
356 | rfi | ||
357 | |||
358 | . = 0x1200 | ||
359 | DataStoreTLBMiss: | ||
360 | #ifdef CONFIG_8xx_CPU6 | ||
361 | stw r3, 8(r0) | ||
362 | #endif | ||
363 | DO_8xx_CPU6(0x3f80, r3) | ||
364 | mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ | ||
365 | mfcr r10 | ||
366 | stw r10, 0(r0) | ||
367 | stw r11, 4(r0) | ||
368 | mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ | ||
369 | |||
370 | /* If we are faulting a kernel address, we have to use the | ||
371 | * kernel page tables. | ||
372 | */ | ||
373 | andi. r11, r10, 0x0800 | ||
374 | beq 3f | ||
375 | lis r11, swapper_pg_dir@h | ||
376 | ori r11, r11, swapper_pg_dir@l | ||
377 | rlwimi r10, r11, 0, 2, 19 | ||
378 | 3: | ||
379 | lwz r11, 0(r10) /* Get the level 1 entry */ | ||
380 | rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ | ||
381 | beq 2f /* If zero, don't try to find a pte */ | ||
382 | |||
383 | /* We have a pte table, so load fetch the pte from the table. | ||
384 | */ | ||
385 | ori r11, r11, 1 /* Set valid bit in physical L2 page */ | ||
386 | DO_8xx_CPU6(0x3b80, r3) | ||
387 | mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ | ||
388 | mfspr r10, SPRN_MD_TWC /* ....and get the pte address */ | ||
389 | lwz r10, 0(r10) /* Get the pte */ | ||
390 | |||
391 | /* Insert the Guarded flag into the TWC from the Linux PTE. | ||
392 | * It is bit 27 of both the Linux PTE and the TWC (at least | ||
393 | * I got that right :-). It will be better when we can put | ||
394 | * this into the Linux pgd/pmd and load it in the operation | ||
395 | * above. | ||
396 | */ | ||
397 | rlwimi r11, r10, 0, 27, 27 | ||
398 | DO_8xx_CPU6(0x3b80, r3) | ||
399 | mtspr SPRN_MD_TWC, r11 | ||
400 | |||
401 | mfspr r11, SPRN_MD_TWC /* get the pte address again */ | ||
402 | ori r10, r10, _PAGE_ACCESSED | ||
403 | stw r10, 0(r11) | ||
404 | |||
405 | /* The Linux PTE won't go exactly into the MMU TLB. | ||
406 | * Software indicator bits 21, 22 and 28 must be clear. | ||
407 | * Software indicator bits 24, 25, 26, and 27 must be | ||
408 | * set. All other Linux PTE bits control the behavior | ||
409 | * of the MMU. | ||
410 | */ | ||
411 | 2: li r11, 0x00f0 | ||
412 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ | ||
413 | DO_8xx_CPU6(0x3d80, r3) | ||
414 | mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ | ||
415 | |||
416 | mfspr r10, SPRN_M_TW /* Restore registers */ | ||
417 | lwz r11, 0(r0) | ||
418 | mtcr r11 | ||
419 | lwz r11, 4(r0) | ||
420 | #ifdef CONFIG_8xx_CPU6 | ||
421 | lwz r3, 8(r0) | ||
422 | #endif | ||
423 | rfi | ||
424 | |||
425 | /* This is an instruction TLB error on the MPC8xx. This could be due | ||
426 | * to many reasons, such as executing guarded memory or illegal instruction | ||
427 | * addresses. There is nothing to do but handle a big time error fault. | ||
428 | */ | ||
429 | . = 0x1300 | ||
430 | InstructionTLBError: | ||
431 | b InstructionAccess | ||
432 | |||
433 | /* This is the data TLB error on the MPC8xx. This could be due to | ||
434 | * many reasons, including a dirty update to a pte. We can catch that | ||
435 | * one here, but anything else is an error. First, we track down the | ||
436 | * Linux pte. If it is valid, write access is allowed, but the | ||
437 | * page dirty bit is not set, we will set it and reload the TLB. For | ||
438 | * any other case, we bail out to a higher level function that can | ||
439 | * handle it. | ||
440 | */ | ||
441 | . = 0x1400 | ||
442 | DataTLBError: | ||
443 | #ifdef CONFIG_8xx_CPU6 | ||
444 | stw r3, 8(r0) | ||
445 | #endif | ||
446 | DO_8xx_CPU6(0x3f80, r3) | ||
447 | mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ | ||
448 | mfcr r10 | ||
449 | stw r10, 0(r0) | ||
450 | stw r11, 4(r0) | ||
451 | |||
452 | /* First, make sure this was a store operation. | ||
453 | */ | ||
454 | mfspr r10, SPRN_DSISR | ||
455 | andis. r11, r10, 0x0200 /* If set, indicates store op */ | ||
456 | beq 2f | ||
457 | |||
458 | /* The EA of a data TLB miss is automatically stored in the MD_EPN | ||
459 | * register. The EA of a data TLB error is automatically stored in | ||
460 | * the DAR, but not the MD_EPN register. We must copy the 20 most | ||
461 | * significant bits of the EA from the DAR to MD_EPN before we | ||
462 | * start walking the page tables. We also need to copy the CASID | ||
463 | * value from the M_CASID register. | ||
464 | * Addendum: The EA of a data TLB error is _supposed_ to be stored | ||
465 | * in DAR, but it seems that this doesn't happen in some cases, such | ||
466 | * as when the error is due to a dcbi instruction to a page with a | ||
467 | * TLB that doesn't have the changed bit set. In such cases, there | ||
468 | * does not appear to be any way to recover the EA of the error | ||
469 | * since it is neither in DAR nor MD_EPN. As a workaround, the | ||
470 | * _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs | ||
471 | * are initialized in mapin_ram(). This will avoid the problem, | ||
472 | * assuming we only use the dcbi instruction on kernel addresses. | ||
473 | */ | ||
474 | mfspr r10, SPRN_DAR | ||
475 | rlwinm r11, r10, 0, 0, 19 | ||
476 | ori r11, r11, MD_EVALID | ||
477 | mfspr r10, SPRN_M_CASID | ||
478 | rlwimi r11, r10, 0, 28, 31 | ||
479 | DO_8xx_CPU6(0x3780, r3) | ||
480 | mtspr SPRN_MD_EPN, r11 | ||
481 | |||
482 | mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ | ||
483 | |||
484 | /* If we are faulting a kernel address, we have to use the | ||
485 | * kernel page tables. | ||
486 | */ | ||
487 | andi. r11, r10, 0x0800 | ||
488 | beq 3f | ||
489 | lis r11, swapper_pg_dir@h | ||
490 | ori r11, r11, swapper_pg_dir@l | ||
491 | rlwimi r10, r11, 0, 2, 19 | ||
492 | 3: | ||
493 | lwz r11, 0(r10) /* Get the level 1 entry */ | ||
494 | rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ | ||
495 | beq 2f /* If zero, bail */ | ||
496 | |||
497 | /* We have a pte table, so fetch the pte from the table. | ||
498 | */ | ||
499 | ori r11, r11, 1 /* Set valid bit in physical L2 page */ | ||
500 | DO_8xx_CPU6(0x3b80, r3) | ||
501 | mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ | ||
502 | mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ | ||
503 | lwz r10, 0(r11) /* Get the pte */ | ||
504 | |||
505 | andi. r11, r10, _PAGE_RW /* Is it writeable? */ | ||
506 | beq 2f /* Bail out if not */ | ||
507 | |||
508 | /* Update 'changed', among others. | ||
509 | */ | ||
510 | ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE | ||
511 | mfspr r11, SPRN_MD_TWC /* Get pte address again */ | ||
512 | stw r10, 0(r11) /* and update pte in table */ | ||
513 | |||
514 | /* The Linux PTE won't go exactly into the MMU TLB. | ||
515 | * Software indicator bits 21, 22 and 28 must be clear. | ||
516 | * Software indicator bits 24, 25, 26, and 27 must be | ||
517 | * set. All other Linux PTE bits control the behavior | ||
518 | * of the MMU. | ||
519 | */ | ||
520 | li r11, 0x00f0 | ||
521 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ | ||
522 | DO_8xx_CPU6(0x3d80, r3) | ||
523 | mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ | ||
524 | |||
525 | mfspr r10, SPRN_M_TW /* Restore registers */ | ||
526 | lwz r11, 0(r0) | ||
527 | mtcr r11 | ||
528 | lwz r11, 4(r0) | ||
529 | #ifdef CONFIG_8xx_CPU6 | ||
530 | lwz r3, 8(r0) | ||
531 | #endif | ||
532 | rfi | ||
533 | 2: | ||
534 | mfspr r10, SPRN_M_TW /* Restore registers */ | ||
535 | lwz r11, 0(r0) | ||
536 | mtcr r11 | ||
537 | lwz r11, 4(r0) | ||
538 | #ifdef CONFIG_8xx_CPU6 | ||
539 | lwz r3, 8(r0) | ||
540 | #endif | ||
541 | b DataAccess | ||
542 | |||
543 | EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) | ||
544 | EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) | ||
545 | EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE) | ||
546 | EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) | ||
547 | EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) | ||
548 | EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE) | ||
549 | EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE) | ||
550 | |||
551 | /* On the MPC8xx, these next four traps are used for development | ||
552 | * support of breakpoints and such. Someday I will get around to | ||
553 | * using them. | ||
554 | */ | ||
555 | EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE) | ||
556 | EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE) | ||
557 | EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE) | ||
558 | EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE) | ||
559 | |||
560 | . = 0x2000 | ||
561 | |||
562 | .globl giveup_fpu | ||
563 | giveup_fpu: | ||
564 | blr | ||
565 | |||
566 | /* | ||
567 | * This is where the main kernel code starts. | ||
568 | */ | ||
569 | start_here: | ||
570 | /* ptr to current */ | ||
571 | lis r2,init_task@h | ||
572 | ori r2,r2,init_task@l | ||
573 | |||
574 | /* ptr to phys current thread */ | ||
575 | tophys(r4,r2) | ||
576 | addi r4,r4,THREAD /* init task's THREAD */ | ||
577 | mtspr SPRN_SPRG3,r4 | ||
578 | li r3,0 | ||
579 | mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */ | ||
580 | |||
581 | /* stack */ | ||
582 | lis r1,init_thread_union@ha | ||
583 | addi r1,r1,init_thread_union@l | ||
584 | li r0,0 | ||
585 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | ||
586 | |||
587 | bl early_init /* We have to do this with MMU on */ | ||
588 | |||
589 | /* | ||
590 | * Decide what sort of machine this is and initialize the MMU. | ||
591 | */ | ||
592 | mr r3,r31 | ||
593 | mr r4,r30 | ||
594 | mr r5,r29 | ||
595 | mr r6,r28 | ||
596 | mr r7,r27 | ||
597 | bl machine_init | ||
598 | bl MMU_init | ||
599 | |||
600 | /* | ||
601 | * Go back to running unmapped so we can load up new values | ||
602 | * and change to using our exception vectors. | ||
603 | * On the 8xx, all we have to do is invalidate the TLB to clear | ||
604 | * the old 8M byte TLB mappings and load the page table base register. | ||
605 | */ | ||
606 | /* The right way to do this would be to track it down through | ||
607 | * init's THREAD like the context switch code does, but this is | ||
608 | * easier......until someone changes init's static structures. | ||
609 | */ | ||
610 | lis r6, swapper_pg_dir@h | ||
611 | ori r6, r6, swapper_pg_dir@l | ||
612 | tophys(r6,r6) | ||
613 | #ifdef CONFIG_8xx_CPU6 | ||
614 | lis r4, cpu6_errata_word@h | ||
615 | ori r4, r4, cpu6_errata_word@l | ||
616 | li r3, 0x3980 | ||
617 | stw r3, 12(r4) | ||
618 | lwz r3, 12(r4) | ||
619 | #endif | ||
620 | mtspr SPRN_M_TWB, r6 | ||
621 | lis r4,2f@h | ||
622 | ori r4,r4,2f@l | ||
623 | tophys(r4,r4) | ||
624 | li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) | ||
625 | mtspr SPRN_SRR0,r4 | ||
626 | mtspr SPRN_SRR1,r3 | ||
627 | rfi | ||
628 | /* Load up the kernel context */ | ||
629 | 2: | ||
630 | SYNC /* Force all PTE updates to finish */ | ||
631 | tlbia /* Clear all TLB entries */ | ||
632 | sync /* wait for tlbia/tlbie to finish */ | ||
633 | TLBSYNC /* ... on all CPUs */ | ||
634 | |||
635 | /* set up the PTE pointers for the Abatron bdiGDB. | ||
636 | */ | ||
637 | tovirt(r6,r6) | ||
638 | lis r5, abatron_pteptrs@h | ||
639 | ori r5, r5, abatron_pteptrs@l | ||
640 | stw r5, 0xf0(r0) /* Must match your Abatron config file */ | ||
641 | tophys(r5,r5) | ||
642 | stw r6, 0(r5) | ||
643 | |||
644 | /* Now turn on the MMU for real! */ | ||
645 | li r4,MSR_KERNEL | ||
646 | lis r3,start_kernel@h | ||
647 | ori r3,r3,start_kernel@l | ||
648 | mtspr SPRN_SRR0,r3 | ||
649 | mtspr SPRN_SRR1,r4 | ||
650 | rfi /* enable MMU and jump to start_kernel */ | ||
651 | |||
652 | /* Set up the initial MMU state so we can do the first level of | ||
653 | * kernel initialization. This maps the first 8 MBytes of memory 1:1 | ||
654 | * virtual to physical. Also, set the cache mode since that is defined | ||
655 | * by TLB entries and perform any additional mapping (like of the IMMR). | ||
656 | * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel, | ||
657 | * 24 Mbytes of data, and the 8M IMMR space. Anything not covered by | ||
658 | * these mappings is mapped by page tables. | ||
659 | */ | ||
660 | initial_mmu: | ||
661 | tlbia /* Invalidate all TLB entries */ | ||
662 | #ifdef CONFIG_PIN_TLB | ||
663 | lis r8, MI_RSV4I@h | ||
664 | ori r8, r8, 0x1c00 | ||
665 | #else | ||
666 | li r8, 0 | ||
667 | #endif | ||
668 | mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */ | ||
669 | |||
670 | #ifdef CONFIG_PIN_TLB | ||
671 | lis r10, (MD_RSV4I | MD_RESETVAL)@h | ||
672 | ori r10, r10, 0x1c00 | ||
673 | mr r8, r10 | ||
674 | #else | ||
675 | lis r10, MD_RESETVAL@h | ||
676 | #endif | ||
677 | #ifndef CONFIG_8xx_COPYBACK | ||
678 | oris r10, r10, MD_WTDEF@h | ||
679 | #endif | ||
680 | mtspr SPRN_MD_CTR, r10 /* Set data TLB control */ | ||
681 | |||
682 | /* Now map the lower 8 Meg into the TLBs. For this quick hack, | ||
683 | * we can load the instruction and data TLB registers with the | ||
684 | * same values. | ||
685 | */ | ||
686 | lis r8, KERNELBASE@h /* Create vaddr for TLB */ | ||
687 | ori r8, r8, MI_EVALID /* Mark it valid */ | ||
688 | mtspr SPRN_MI_EPN, r8 | ||
689 | mtspr SPRN_MD_EPN, r8 | ||
690 | li r8, MI_PS8MEG /* Set 8M byte page */ | ||
691 | ori r8, r8, MI_SVALID /* Make it valid */ | ||
692 | mtspr SPRN_MI_TWC, r8 | ||
693 | mtspr SPRN_MD_TWC, r8 | ||
694 | li r8, MI_BOOTINIT /* Create RPN for address 0 */ | ||
695 | mtspr SPRN_MI_RPN, r8 /* Store TLB entry */ | ||
696 | mtspr SPRN_MD_RPN, r8 | ||
697 | lis r8, MI_Kp@h /* Set the protection mode */ | ||
698 | mtspr SPRN_MI_AP, r8 | ||
699 | mtspr SPRN_MD_AP, r8 | ||
700 | |||
701 | /* Map another 8 MByte at the IMMR to get the processor | ||
702 | * internal registers (among other things). | ||
703 | */ | ||
704 | #ifdef CONFIG_PIN_TLB | ||
705 | addi r10, r10, 0x0100 | ||
706 | mtspr SPRN_MD_CTR, r10 | ||
707 | #endif | ||
708 | mfspr r9, 638 /* Get current IMMR */ | ||
709 | andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */ | ||
710 | |||
711 | mr r8, r9 /* Create vaddr for TLB */ | ||
712 | ori r8, r8, MD_EVALID /* Mark it valid */ | ||
713 | mtspr SPRN_MD_EPN, r8 | ||
714 | li r8, MD_PS8MEG /* Set 8M byte page */ | ||
715 | ori r8, r8, MD_SVALID /* Make it valid */ | ||
716 | mtspr SPRN_MD_TWC, r8 | ||
717 | mr r8, r9 /* Create paddr for TLB */ | ||
718 | ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */ | ||
719 | mtspr SPRN_MD_RPN, r8 | ||
720 | |||
721 | #ifdef CONFIG_PIN_TLB | ||
722 | /* Map two more 8M kernel data pages. | ||
723 | */ | ||
724 | addi r10, r10, 0x0100 | ||
725 | mtspr SPRN_MD_CTR, r10 | ||
726 | |||
727 | lis r8, KERNELBASE@h /* Create vaddr for TLB */ | ||
728 | addis r8, r8, 0x0080 /* Add 8M */ | ||
729 | ori r8, r8, MI_EVALID /* Mark it valid */ | ||
730 | mtspr SPRN_MD_EPN, r8 | ||
731 | li r9, MI_PS8MEG /* Set 8M byte page */ | ||
732 | ori r9, r9, MI_SVALID /* Make it valid */ | ||
733 | mtspr SPRN_MD_TWC, r9 | ||
734 | li r11, MI_BOOTINIT /* Create RPN for address 0 */ | ||
735 | addis r11, r11, 0x0080 /* Add 8M */ | ||
736 | mtspr SPRN_MD_RPN, r8 | ||
737 | |||
738 | addis r8, r8, 0x0080 /* Add 8M */ | ||
739 | mtspr SPRN_MD_EPN, r8 | ||
740 | mtspr SPRN_MD_TWC, r9 | ||
741 | addis r11, r11, 0x0080 /* Add 8M */ | ||
742 | mtspr SPRN_MD_RPN, r8 | ||
743 | #endif | ||
744 | |||
745 | /* Since the cache is enabled according to the information we | ||
746 | * just loaded into the TLB, invalidate and enable the caches here. | ||
747 | * We should probably check/set other modes....later. | ||
748 | */ | ||
749 | lis r8, IDC_INVALL@h | ||
750 | mtspr SPRN_IC_CST, r8 | ||
751 | mtspr SPRN_DC_CST, r8 | ||
752 | lis r8, IDC_ENABLE@h | ||
753 | mtspr SPRN_IC_CST, r8 | ||
754 | #ifdef CONFIG_8xx_COPYBACK | ||
755 | mtspr SPRN_DC_CST, r8 | ||
756 | #else | ||
757 | /* For a debug option, I left this here to easily enable | ||
758 | * the write through cache mode | ||
759 | */ | ||
760 | lis r8, DC_SFWT@h | ||
761 | mtspr SPRN_DC_CST, r8 | ||
762 | lis r8, IDC_ENABLE@h | ||
763 | mtspr SPRN_DC_CST, r8 | ||
764 | #endif | ||
765 | blr | ||
766 | |||
767 | |||
768 | /* | ||
769 | * Set up to use a given MMU context. | ||
770 | * r3 is context number, r4 is PGD pointer. | ||
771 | * | ||
772 | * We place the physical address of the new task page directory loaded | ||
773 | * into the MMU base register, and set the ASID compare register with | ||
774 | * the new "context." | ||
775 | */ | ||
776 | _GLOBAL(set_context) | ||
777 | |||
778 | #ifdef CONFIG_BDI_SWITCH | ||
779 | /* Context switch the PTE pointer for the Abatron BDI2000. | ||
780 | * The PGDIR is passed as second argument. | ||
781 | */ | ||
782 | lis r5, KERNELBASE@h | ||
783 | lwz r5, 0xf0(r5) | ||
784 | stw r4, 0x4(r5) | ||
785 | #endif | ||
786 | |||
787 | #ifdef CONFIG_8xx_CPU6 | ||
788 | lis r6, cpu6_errata_word@h | ||
789 | ori r6, r6, cpu6_errata_word@l | ||
790 | tophys (r4, r4) | ||
791 | li r7, 0x3980 | ||
792 | stw r7, 12(r6) | ||
793 | lwz r7, 12(r6) | ||
794 | mtspr SPRN_M_TWB, r4 /* Update MMU base address */ | ||
795 | li r7, 0x3380 | ||
796 | stw r7, 12(r6) | ||
797 | lwz r7, 12(r6) | ||
798 | mtspr SPRN_M_CASID, r3 /* Update context */ | ||
799 | #else | ||
800 | mtspr SPRN_M_CASID,r3 /* Update context */ | ||
801 | tophys (r4, r4) | ||
802 | mtspr SPRN_M_TWB, r4 /* and pgd */ | ||
803 | #endif | ||
804 | SYNC | ||
805 | blr | ||
806 | |||
807 | #ifdef CONFIG_8xx_CPU6 | ||
808 | /* It's here because it is unique to the 8xx. | ||
809 | * It is important we get called with interrupts disabled. I used to | ||
810 | * do that, but it appears that all code that calls this already had | ||
811 | * interrupt disabled. | ||
812 | */ | ||
813 | .globl set_dec_cpu6 | ||
814 | set_dec_cpu6: | ||
815 | lis r7, cpu6_errata_word@h | ||
816 | ori r7, r7, cpu6_errata_word@l | ||
817 | li r4, 0x2c00 | ||
818 | stw r4, 8(r7) | ||
819 | lwz r4, 8(r7) | ||
820 | mtspr 22, r3 /* Update Decrementer */ | ||
821 | SYNC | ||
822 | blr | ||
823 | #endif | ||
824 | |||
825 | /* | ||
826 | * We put a few things here that have to be page-aligned. | ||
827 | * This stuff goes at the beginning of the data segment, | ||
828 | * which is page-aligned. | ||
829 | */ | ||
830 | .data | ||
831 | .globl sdata | ||
832 | sdata: | ||
833 | .globl empty_zero_page | ||
834 | empty_zero_page: | ||
835 | .space 4096 | ||
836 | |||
837 | .globl swapper_pg_dir | ||
838 | swapper_pg_dir: | ||
839 | .space 4096 | ||
840 | |||
841 | /* | ||
842 | * This space gets a copy of optional info passed to us by the bootstrap | ||
843 | * Used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
844 | */ | ||
845 | .globl cmd_line | ||
846 | cmd_line: | ||
847 | .space 512 | ||
848 | |||
849 | /* Room for two PTE table poiners, usually the kernel and current user | ||
850 | * pointer to their respective root page table (pgdir). | ||
851 | */ | ||
852 | abatron_pteptrs: | ||
853 | .space 8 | ||
854 | |||
855 | #ifdef CONFIG_8xx_CPU6 | ||
856 | .globl cpu6_errata_word | ||
857 | cpu6_errata_word: | ||
858 | .space 16 | ||
859 | #endif | ||
860 | |||
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S new file mode 100644 index 000000000000..eba5a5f8ff08 --- /dev/null +++ b/arch/powerpc/kernel/head_fsl_booke.S | |||
@@ -0,0 +1,1058 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/head_fsl_booke.S | ||
3 | * | ||
4 | * Kernel execution entry point code. | ||
5 | * | ||
6 | * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> | ||
7 | * Initial PowerPC version. | ||
8 | * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
9 | * Rewritten for PReP | ||
10 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | ||
11 | * Low-level exception handers, MMU support, and rewrite. | ||
12 | * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> | ||
13 | * PowerPC 8xx modifications. | ||
14 | * Copyright (c) 1998-1999 TiVo, Inc. | ||
15 | * PowerPC 403GCX modifications. | ||
16 | * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> | ||
17 | * PowerPC 403GCX/405GP modifications. | ||
18 | * Copyright 2000 MontaVista Software Inc. | ||
19 | * PPC405 modifications | ||
20 | * PowerPC 403GCX/405GP modifications. | ||
21 | * Author: MontaVista Software, Inc. | ||
22 | * frank_rowand@mvista.com or source@mvista.com | ||
23 | * debbie_chu@mvista.com | ||
24 | * Copyright 2002-2004 MontaVista Software, Inc. | ||
25 | * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> | ||
26 | * Copyright 2004 Freescale Semiconductor, Inc | ||
27 | * PowerPC e500 modifications, Kumar Gala <kumar.gala@freescale.com> | ||
28 | * | ||
29 | * This program is free software; you can redistribute it and/or modify it | ||
30 | * under the terms of the GNU General Public License as published by the | ||
31 | * Free Software Foundation; either version 2 of the License, or (at your | ||
32 | * option) any later version. | ||
33 | */ | ||
34 | |||
35 | #include <linux/config.h> | ||
36 | #include <linux/threads.h> | ||
37 | #include <asm/processor.h> | ||
38 | #include <asm/page.h> | ||
39 | #include <asm/mmu.h> | ||
40 | #include <asm/pgtable.h> | ||
41 | #include <asm/cputable.h> | ||
42 | #include <asm/thread_info.h> | ||
43 | #include <asm/ppc_asm.h> | ||
44 | #include <asm/asm-offsets.h> | ||
45 | #include "head_booke.h" | ||
46 | |||
47 | /* As with the other PowerPC ports, it is expected that when code | ||
48 | * execution begins here, the following registers contain valid, yet | ||
49 | * optional, information: | ||
50 | * | ||
51 | * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) | ||
52 | * r4 - Starting address of the init RAM disk | ||
53 | * r5 - Ending address of the init RAM disk | ||
54 | * r6 - Start of kernel command line string (e.g. "mem=128") | ||
55 | * r7 - End of kernel command line string | ||
56 | * | ||
57 | */ | ||
58 | .text | ||
59 | _GLOBAL(_stext) | ||
60 | _GLOBAL(_start) | ||
61 | /* | ||
62 | * Reserve a word at a fixed location to store the address | ||
63 | * of abatron_pteptrs | ||
64 | */ | ||
65 | nop | ||
66 | /* | ||
67 | * Save parameters we are passed | ||
68 | */ | ||
69 | mr r31,r3 | ||
70 | mr r30,r4 | ||
71 | mr r29,r5 | ||
72 | mr r28,r6 | ||
73 | mr r27,r7 | ||
74 | li r24,0 /* CPU number */ | ||
75 | |||
76 | /* We try to not make any assumptions about how the boot loader | ||
77 | * setup or used the TLBs. We invalidate all mappings from the | ||
78 | * boot loader and load a single entry in TLB1[0] to map the | ||
79 | * first 16M of kernel memory. Any boot info passed from the | ||
80 | * bootloader needs to live in this first 16M. | ||
81 | * | ||
82 | * Requirement on bootloader: | ||
83 | * - The page we're executing in needs to reside in TLB1 and | ||
84 | * have IPROT=1. If not an invalidate broadcast could | ||
85 | * evict the entry we're currently executing in. | ||
86 | * | ||
87 | * r3 = Index of TLB1 were executing in | ||
88 | * r4 = Current MSR[IS] | ||
89 | * r5 = Index of TLB1 temp mapping | ||
90 | * | ||
91 | * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0] | ||
92 | * if needed | ||
93 | */ | ||
94 | |||
95 | /* 1. Find the index of the entry we're executing in */ | ||
96 | bl invstr /* Find our address */ | ||
97 | invstr: mflr r6 /* Make it accessible */ | ||
98 | mfmsr r7 | ||
99 | rlwinm r4,r7,27,31,31 /* extract MSR[IS] */ | ||
100 | mfspr r7, SPRN_PID0 | ||
101 | slwi r7,r7,16 | ||
102 | or r7,r7,r4 | ||
103 | mtspr SPRN_MAS6,r7 | ||
104 | tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */ | ||
105 | #ifndef CONFIG_E200 | ||
106 | mfspr r7,SPRN_MAS1 | ||
107 | andis. r7,r7,MAS1_VALID@h | ||
108 | bne match_TLB | ||
109 | mfspr r7,SPRN_PID1 | ||
110 | slwi r7,r7,16 | ||
111 | or r7,r7,r4 | ||
112 | mtspr SPRN_MAS6,r7 | ||
113 | tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */ | ||
114 | mfspr r7,SPRN_MAS1 | ||
115 | andis. r7,r7,MAS1_VALID@h | ||
116 | bne match_TLB | ||
117 | mfspr r7, SPRN_PID2 | ||
118 | slwi r7,r7,16 | ||
119 | or r7,r7,r4 | ||
120 | mtspr SPRN_MAS6,r7 | ||
121 | tlbsx 0,r6 /* Fall through, we had to match */ | ||
122 | #endif | ||
123 | match_TLB: | ||
124 | mfspr r7,SPRN_MAS0 | ||
125 | rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */ | ||
126 | |||
127 | mfspr r7,SPRN_MAS1 /* Insure IPROT set */ | ||
128 | oris r7,r7,MAS1_IPROT@h | ||
129 | mtspr SPRN_MAS1,r7 | ||
130 | tlbwe | ||
131 | |||
132 | /* 2. Invalidate all entries except the entry we're executing in */ | ||
133 | mfspr r9,SPRN_TLB1CFG | ||
134 | andi. r9,r9,0xfff | ||
135 | li r6,0 /* Set Entry counter to 0 */ | ||
136 | 1: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ | ||
137 | rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ | ||
138 | mtspr SPRN_MAS0,r7 | ||
139 | tlbre | ||
140 | mfspr r7,SPRN_MAS1 | ||
141 | rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ | ||
142 | cmpw r3,r6 | ||
143 | beq skpinv /* Dont update the current execution TLB */ | ||
144 | mtspr SPRN_MAS1,r7 | ||
145 | tlbwe | ||
146 | isync | ||
147 | skpinv: addi r6,r6,1 /* Increment */ | ||
148 | cmpw r6,r9 /* Are we done? */ | ||
149 | bne 1b /* If not, repeat */ | ||
150 | |||
151 | /* Invalidate TLB0 */ | ||
152 | li r6,0x04 | ||
153 | tlbivax 0,r6 | ||
154 | #ifdef CONFIG_SMP | ||
155 | tlbsync | ||
156 | #endif | ||
157 | /* Invalidate TLB1 */ | ||
158 | li r6,0x0c | ||
159 | tlbivax 0,r6 | ||
160 | #ifdef CONFIG_SMP | ||
161 | tlbsync | ||
162 | #endif | ||
163 | msync | ||
164 | |||
165 | /* 3. Setup a temp mapping and jump to it */ | ||
166 | andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */ | ||
167 | addi r5, r5, 0x1 | ||
168 | lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ | ||
169 | rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ | ||
170 | mtspr SPRN_MAS0,r7 | ||
171 | tlbre | ||
172 | |||
173 | /* Just modify the entry ID and EPN for the temp mapping */ | ||
174 | lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ | ||
175 | rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ | ||
176 | mtspr SPRN_MAS0,r7 | ||
177 | xori r6,r4,1 /* Setup TMP mapping in the other Address space */ | ||
178 | slwi r6,r6,12 | ||
179 | oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h | ||
180 | ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l | ||
181 | mtspr SPRN_MAS1,r6 | ||
182 | mfspr r6,SPRN_MAS2 | ||
183 | li r7,0 /* temp EPN = 0 */ | ||
184 | rlwimi r7,r6,0,20,31 | ||
185 | mtspr SPRN_MAS2,r7 | ||
186 | tlbwe | ||
187 | |||
188 | xori r6,r4,1 | ||
189 | slwi r6,r6,5 /* setup new context with other address space */ | ||
190 | bl 1f /* Find our address */ | ||
191 | 1: mflr r9 | ||
192 | rlwimi r7,r9,0,20,31 | ||
193 | addi r7,r7,24 | ||
194 | mtspr SPRN_SRR0,r7 | ||
195 | mtspr SPRN_SRR1,r6 | ||
196 | rfi | ||
197 | |||
198 | /* 4. Clear out PIDs & Search info */ | ||
199 | li r6,0 | ||
200 | mtspr SPRN_PID0,r6 | ||
201 | #ifndef CONFIG_E200 | ||
202 | mtspr SPRN_PID1,r6 | ||
203 | mtspr SPRN_PID2,r6 | ||
204 | #endif | ||
205 | mtspr SPRN_MAS6,r6 | ||
206 | |||
207 | /* 5. Invalidate mapping we started in */ | ||
208 | lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ | ||
209 | rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ | ||
210 | mtspr SPRN_MAS0,r7 | ||
211 | tlbre | ||
212 | li r6,0 | ||
213 | mtspr SPRN_MAS1,r6 | ||
214 | tlbwe | ||
215 | /* Invalidate TLB1 */ | ||
216 | li r9,0x0c | ||
217 | tlbivax 0,r9 | ||
218 | #ifdef CONFIG_SMP | ||
219 | tlbsync | ||
220 | #endif | ||
221 | msync | ||
222 | |||
223 | /* 6. Setup KERNELBASE mapping in TLB1[0] */ | ||
224 | lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ | ||
225 | mtspr SPRN_MAS0,r6 | ||
226 | lis r6,(MAS1_VALID|MAS1_IPROT)@h | ||
227 | ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l | ||
228 | mtspr SPRN_MAS1,r6 | ||
229 | li r7,0 | ||
230 | lis r6,KERNELBASE@h | ||
231 | ori r6,r6,KERNELBASE@l | ||
232 | rlwimi r6,r7,0,20,31 | ||
233 | mtspr SPRN_MAS2,r6 | ||
234 | li r7,(MAS3_SX|MAS3_SW|MAS3_SR) | ||
235 | mtspr SPRN_MAS3,r7 | ||
236 | tlbwe | ||
237 | |||
238 | /* 7. Jump to KERNELBASE mapping */ | ||
239 | lis r7,MSR_KERNEL@h | ||
240 | ori r7,r7,MSR_KERNEL@l | ||
241 | bl 1f /* Find our address */ | ||
242 | 1: mflr r9 | ||
243 | rlwimi r6,r9,0,20,31 | ||
244 | addi r6,r6,24 | ||
245 | mtspr SPRN_SRR0,r6 | ||
246 | mtspr SPRN_SRR1,r7 | ||
247 | rfi /* start execution out of TLB1[0] entry */ | ||
248 | |||
249 | /* 8. Clear out the temp mapping */ | ||
250 | lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ | ||
251 | rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ | ||
252 | mtspr SPRN_MAS0,r7 | ||
253 | tlbre | ||
254 | mtspr SPRN_MAS1,r8 | ||
255 | tlbwe | ||
256 | /* Invalidate TLB1 */ | ||
257 | li r9,0x0c | ||
258 | tlbivax 0,r9 | ||
259 | #ifdef CONFIG_SMP | ||
260 | tlbsync | ||
261 | #endif | ||
262 | msync | ||
263 | |||
264 | /* Establish the interrupt vector offsets */ | ||
265 | SET_IVOR(0, CriticalInput); | ||
266 | SET_IVOR(1, MachineCheck); | ||
267 | SET_IVOR(2, DataStorage); | ||
268 | SET_IVOR(3, InstructionStorage); | ||
269 | SET_IVOR(4, ExternalInput); | ||
270 | SET_IVOR(5, Alignment); | ||
271 | SET_IVOR(6, Program); | ||
272 | SET_IVOR(7, FloatingPointUnavailable); | ||
273 | SET_IVOR(8, SystemCall); | ||
274 | SET_IVOR(9, AuxillaryProcessorUnavailable); | ||
275 | SET_IVOR(10, Decrementer); | ||
276 | SET_IVOR(11, FixedIntervalTimer); | ||
277 | SET_IVOR(12, WatchdogTimer); | ||
278 | SET_IVOR(13, DataTLBError); | ||
279 | SET_IVOR(14, InstructionTLBError); | ||
280 | SET_IVOR(15, Debug); | ||
281 | SET_IVOR(32, SPEUnavailable); | ||
282 | SET_IVOR(33, SPEFloatingPointData); | ||
283 | SET_IVOR(34, SPEFloatingPointRound); | ||
284 | #ifndef CONFIG_E200 | ||
285 | SET_IVOR(35, PerformanceMonitor); | ||
286 | #endif | ||
287 | |||
288 | /* Establish the interrupt vector base */ | ||
289 | lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ | ||
290 | mtspr SPRN_IVPR,r4 | ||
291 | |||
292 | /* Setup the defaults for TLB entries */ | ||
293 | li r2,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l | ||
294 | #ifdef CONFIG_E200 | ||
295 | oris r2,r2,MAS4_TLBSELD(1)@h | ||
296 | #endif | ||
297 | mtspr SPRN_MAS4, r2 | ||
298 | |||
299 | #if 0 | ||
300 | /* Enable DOZE */ | ||
301 | mfspr r2,SPRN_HID0 | ||
302 | oris r2,r2,HID0_DOZE@h | ||
303 | mtspr SPRN_HID0, r2 | ||
304 | #endif | ||
305 | #ifdef CONFIG_E200 | ||
306 | /* enable dedicated debug exception handling resources (Debug APU) */ | ||
307 | mfspr r2,SPRN_HID0 | ||
308 | ori r2,r2,HID0_DAPUEN@l | ||
309 | mtspr SPRN_HID0,r2 | ||
310 | #endif | ||
311 | |||
312 | #if !defined(CONFIG_BDI_SWITCH) | ||
313 | /* | ||
314 | * The Abatron BDI JTAG debugger does not tolerate others | ||
315 | * mucking with the debug registers. | ||
316 | */ | ||
317 | lis r2,DBCR0_IDM@h | ||
318 | mtspr SPRN_DBCR0,r2 | ||
319 | /* clear any residual debug events */ | ||
320 | li r2,-1 | ||
321 | mtspr SPRN_DBSR,r2 | ||
322 | #endif | ||
323 | |||
324 | /* | ||
325 | * This is where the main kernel code starts. | ||
326 | */ | ||
327 | |||
328 | /* ptr to current */ | ||
329 | lis r2,init_task@h | ||
330 | ori r2,r2,init_task@l | ||
331 | |||
332 | /* ptr to current thread */ | ||
333 | addi r4,r2,THREAD /* init task's THREAD */ | ||
334 | mtspr SPRN_SPRG3,r4 | ||
335 | |||
336 | /* stack */ | ||
337 | lis r1,init_thread_union@h | ||
338 | ori r1,r1,init_thread_union@l | ||
339 | li r0,0 | ||
340 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | ||
341 | |||
342 | bl early_init | ||
343 | |||
344 | mfspr r3,SPRN_TLB1CFG | ||
345 | andi. r3,r3,0xfff | ||
346 | lis r4,num_tlbcam_entries@ha | ||
347 | stw r3,num_tlbcam_entries@l(r4) | ||
348 | /* | ||
349 | * Decide what sort of machine this is and initialize the MMU. | ||
350 | */ | ||
351 | mr r3,r31 | ||
352 | mr r4,r30 | ||
353 | mr r5,r29 | ||
354 | mr r6,r28 | ||
355 | mr r7,r27 | ||
356 | bl machine_init | ||
357 | bl MMU_init | ||
358 | |||
359 | /* Setup PTE pointers for the Abatron bdiGDB */ | ||
360 | lis r6, swapper_pg_dir@h | ||
361 | ori r6, r6, swapper_pg_dir@l | ||
362 | lis r5, abatron_pteptrs@h | ||
363 | ori r5, r5, abatron_pteptrs@l | ||
364 | lis r4, KERNELBASE@h | ||
365 | ori r4, r4, KERNELBASE@l | ||
366 | stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ | ||
367 | stw r6, 0(r5) | ||
368 | |||
369 | /* Let's move on */ | ||
370 | lis r4,start_kernel@h | ||
371 | ori r4,r4,start_kernel@l | ||
372 | lis r3,MSR_KERNEL@h | ||
373 | ori r3,r3,MSR_KERNEL@l | ||
374 | mtspr SPRN_SRR0,r4 | ||
375 | mtspr SPRN_SRR1,r3 | ||
376 | rfi /* change context and jump to start_kernel */ | ||
377 | |||
378 | /* Macros to hide the PTE size differences | ||
379 | * | ||
380 | * FIND_PTE -- walks the page tables given EA & pgdir pointer | ||
381 | * r10 -- EA of fault | ||
382 | * r11 -- PGDIR pointer | ||
383 | * r12 -- free | ||
384 | * label 2: is the bailout case | ||
385 | * | ||
386 | * if we find the pte (fall through): | ||
387 | * r11 is low pte word | ||
388 | * r12 is pointer to the pte | ||
389 | */ | ||
390 | #ifdef CONFIG_PTE_64BIT | ||
391 | #define PTE_FLAGS_OFFSET 4 | ||
392 | #define FIND_PTE \ | ||
393 | rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ | ||
394 | lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ | ||
395 | rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ | ||
396 | beq 2f; /* Bail if no table */ \ | ||
397 | rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ | ||
398 | lwz r11, 4(r12); /* Get pte entry */ | ||
399 | #else | ||
400 | #define PTE_FLAGS_OFFSET 0 | ||
401 | #define FIND_PTE \ | ||
402 | rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ | ||
403 | lwz r11, 0(r11); /* Get L1 entry */ \ | ||
404 | rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \ | ||
405 | beq 2f; /* Bail if no table */ \ | ||
406 | rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \ | ||
407 | lwz r11, 0(r12); /* Get Linux PTE */ | ||
408 | #endif | ||
409 | |||
410 | /* | ||
411 | * Interrupt vector entry code | ||
412 | * | ||
413 | * The Book E MMUs are always on so we don't need to handle | ||
414 | * interrupts in real mode as with previous PPC processors. In | ||
415 | * this case we handle interrupts in the kernel virtual address | ||
416 | * space. | ||
417 | * | ||
418 | * Interrupt vectors are dynamically placed relative to the | ||
419 | * interrupt prefix as determined by the address of interrupt_base. | ||
420 | * The interrupt vectors offsets are programmed using the labels | ||
421 | * for each interrupt vector entry. | ||
422 | * | ||
423 | * Interrupt vectors must be aligned on a 16 byte boundary. | ||
424 | * We align on a 32 byte cache line boundary for good measure. | ||
425 | */ | ||
426 | |||
427 | interrupt_base: | ||
428 | /* Critical Input Interrupt */ | ||
429 | CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException) | ||
430 | |||
431 | /* Machine Check Interrupt */ | ||
432 | #ifdef CONFIG_E200 | ||
433 | /* no RFMCI, MCSRRs on E200 */ | ||
434 | CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) | ||
435 | #else | ||
436 | MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException) | ||
437 | #endif | ||
438 | |||
439 | /* Data Storage Interrupt */ | ||
440 | START_EXCEPTION(DataStorage) | ||
441 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
442 | mtspr SPRN_SPRG1, r11 | ||
443 | mtspr SPRN_SPRG4W, r12 | ||
444 | mtspr SPRN_SPRG5W, r13 | ||
445 | mfcr r11 | ||
446 | mtspr SPRN_SPRG7W, r11 | ||
447 | |||
448 | /* | ||
449 | * Check if it was a store fault, if not then bail | ||
450 | * because a user tried to access a kernel or | ||
451 | * read-protected page. Otherwise, get the | ||
452 | * offending address and handle it. | ||
453 | */ | ||
454 | mfspr r10, SPRN_ESR | ||
455 | andis. r10, r10, ESR_ST@h | ||
456 | beq 2f | ||
457 | |||
458 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
459 | |||
460 | /* If we are faulting a kernel address, we have to use the | ||
461 | * kernel page tables. | ||
462 | */ | ||
463 | lis r11, TASK_SIZE@h | ||
464 | ori r11, r11, TASK_SIZE@l | ||
465 | cmplw 0, r10, r11 | ||
466 | bge 2f | ||
467 | |||
468 | /* Get the PGD for the current thread */ | ||
469 | 3: | ||
470 | mfspr r11,SPRN_SPRG3 | ||
471 | lwz r11,PGDIR(r11) | ||
472 | 4: | ||
473 | FIND_PTE | ||
474 | |||
475 | /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */ | ||
476 | andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE | ||
477 | cmpwi 0, r13, _PAGE_RW|_PAGE_USER | ||
478 | bne 2f /* Bail if not */ | ||
479 | |||
480 | /* Update 'changed'. */ | ||
481 | ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE | ||
482 | stw r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */ | ||
483 | |||
484 | /* MAS2 not updated as the entry does exist in the tlb, this | ||
485 | fault taken to detect state transition (eg: COW -> DIRTY) | ||
486 | */ | ||
487 | andi. r11, r11, _PAGE_HWEXEC | ||
488 | rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */ | ||
489 | ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */ | ||
490 | |||
491 | /* update search PID in MAS6, AS = 0 */ | ||
492 | mfspr r12, SPRN_PID0 | ||
493 | slwi r12, r12, 16 | ||
494 | mtspr SPRN_MAS6, r12 | ||
495 | |||
496 | /* find the TLB index that caused the fault. It has to be here. */ | ||
497 | tlbsx 0, r10 | ||
498 | |||
499 | /* only update the perm bits, assume the RPN is fine */ | ||
500 | mfspr r12, SPRN_MAS3 | ||
501 | rlwimi r12, r11, 0, 20, 31 | ||
502 | mtspr SPRN_MAS3,r12 | ||
503 | tlbwe | ||
504 | |||
505 | /* Done...restore registers and get out of here. */ | ||
506 | mfspr r11, SPRN_SPRG7R | ||
507 | mtcr r11 | ||
508 | mfspr r13, SPRN_SPRG5R | ||
509 | mfspr r12, SPRN_SPRG4R | ||
510 | mfspr r11, SPRN_SPRG1 | ||
511 | mfspr r10, SPRN_SPRG0 | ||
512 | rfi /* Force context change */ | ||
513 | |||
514 | 2: | ||
515 | /* | ||
516 | * The bailout. Restore registers to pre-exception conditions | ||
517 | * and call the heavyweights to help us out. | ||
518 | */ | ||
519 | mfspr r11, SPRN_SPRG7R | ||
520 | mtcr r11 | ||
521 | mfspr r13, SPRN_SPRG5R | ||
522 | mfspr r12, SPRN_SPRG4R | ||
523 | mfspr r11, SPRN_SPRG1 | ||
524 | mfspr r10, SPRN_SPRG0 | ||
525 | b data_access | ||
526 | |||
527 | /* Instruction Storage Interrupt */ | ||
528 | INSTRUCTION_STORAGE_EXCEPTION | ||
529 | |||
530 | /* External Input Interrupt */ | ||
531 | EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) | ||
532 | |||
533 | /* Alignment Interrupt */ | ||
534 | ALIGNMENT_EXCEPTION | ||
535 | |||
536 | /* Program Interrupt */ | ||
537 | PROGRAM_EXCEPTION | ||
538 | |||
539 | /* Floating Point Unavailable Interrupt */ | ||
540 | #ifdef CONFIG_PPC_FPU | ||
541 | FP_UNAVAILABLE_EXCEPTION | ||
542 | #else | ||
543 | #ifdef CONFIG_E200 | ||
544 | /* E200 treats 'normal' floating point instructions as FP Unavail exception */ | ||
545 | EXCEPTION(0x0800, FloatingPointUnavailable, ProgramCheckException, EXC_XFER_EE) | ||
546 | #else | ||
547 | EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) | ||
548 | #endif | ||
549 | #endif | ||
550 | |||
551 | /* System Call Interrupt */ | ||
552 | START_EXCEPTION(SystemCall) | ||
553 | NORMAL_EXCEPTION_PROLOG | ||
554 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) | ||
555 | |||
556 | /* Auxillary Processor Unavailable Interrupt */ | ||
557 | EXCEPTION(0x2900, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE) | ||
558 | |||
559 | /* Decrementer Interrupt */ | ||
560 | DECREMENTER_EXCEPTION | ||
561 | |||
562 | /* Fixed Internal Timer Interrupt */ | ||
563 | /* TODO: Add FIT support */ | ||
564 | EXCEPTION(0x3100, FixedIntervalTimer, UnknownException, EXC_XFER_EE) | ||
565 | |||
566 | /* Watchdog Timer Interrupt */ | ||
567 | #ifdef CONFIG_BOOKE_WDT | ||
568 | CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException) | ||
569 | #else | ||
570 | CRITICAL_EXCEPTION(0x3200, WatchdogTimer, UnknownException) | ||
571 | #endif | ||
572 | |||
573 | /* Data TLB Error Interrupt */ | ||
574 | START_EXCEPTION(DataTLBError) | ||
575 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
576 | mtspr SPRN_SPRG1, r11 | ||
577 | mtspr SPRN_SPRG4W, r12 | ||
578 | mtspr SPRN_SPRG5W, r13 | ||
579 | mfcr r11 | ||
580 | mtspr SPRN_SPRG7W, r11 | ||
581 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
582 | |||
583 | /* If we are faulting a kernel address, we have to use the | ||
584 | * kernel page tables. | ||
585 | */ | ||
586 | lis r11, TASK_SIZE@h | ||
587 | ori r11, r11, TASK_SIZE@l | ||
588 | cmplw 5, r10, r11 | ||
589 | blt 5, 3f | ||
590 | lis r11, swapper_pg_dir@h | ||
591 | ori r11, r11, swapper_pg_dir@l | ||
592 | |||
593 | mfspr r12,SPRN_MAS1 /* Set TID to 0 */ | ||
594 | rlwinm r12,r12,0,16,1 | ||
595 | mtspr SPRN_MAS1,r12 | ||
596 | |||
597 | b 4f | ||
598 | |||
599 | /* Get the PGD for the current thread */ | ||
600 | 3: | ||
601 | mfspr r11,SPRN_SPRG3 | ||
602 | lwz r11,PGDIR(r11) | ||
603 | |||
604 | 4: | ||
605 | FIND_PTE | ||
606 | andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ | ||
607 | beq 2f /* Bail if not present */ | ||
608 | |||
609 | #ifdef CONFIG_PTE_64BIT | ||
610 | lwz r13, 0(r12) | ||
611 | #endif | ||
612 | ori r11, r11, _PAGE_ACCESSED | ||
613 | stw r11, PTE_FLAGS_OFFSET(r12) | ||
614 | |||
615 | /* Jump to common tlb load */ | ||
616 | b finish_tlb_load | ||
617 | 2: | ||
618 | /* The bailout. Restore registers to pre-exception conditions | ||
619 | * and call the heavyweights to help us out. | ||
620 | */ | ||
621 | mfspr r11, SPRN_SPRG7R | ||
622 | mtcr r11 | ||
623 | mfspr r13, SPRN_SPRG5R | ||
624 | mfspr r12, SPRN_SPRG4R | ||
625 | mfspr r11, SPRN_SPRG1 | ||
626 | mfspr r10, SPRN_SPRG0 | ||
627 | b data_access | ||
628 | |||
629 | /* Instruction TLB Error Interrupt */ | ||
630 | /* | ||
631 | * Nearly the same as above, except we get our | ||
632 | * information from different registers and bailout | ||
633 | * to a different point. | ||
634 | */ | ||
635 | START_EXCEPTION(InstructionTLBError) | ||
636 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
637 | mtspr SPRN_SPRG1, r11 | ||
638 | mtspr SPRN_SPRG4W, r12 | ||
639 | mtspr SPRN_SPRG5W, r13 | ||
640 | mfcr r11 | ||
641 | mtspr SPRN_SPRG7W, r11 | ||
642 | mfspr r10, SPRN_SRR0 /* Get faulting address */ | ||
643 | |||
644 | /* If we are faulting a kernel address, we have to use the | ||
645 | * kernel page tables. | ||
646 | */ | ||
647 | lis r11, TASK_SIZE@h | ||
648 | ori r11, r11, TASK_SIZE@l | ||
649 | cmplw 5, r10, r11 | ||
650 | blt 5, 3f | ||
651 | lis r11, swapper_pg_dir@h | ||
652 | ori r11, r11, swapper_pg_dir@l | ||
653 | |||
654 | mfspr r12,SPRN_MAS1 /* Set TID to 0 */ | ||
655 | rlwinm r12,r12,0,16,1 | ||
656 | mtspr SPRN_MAS1,r12 | ||
657 | |||
658 | b 4f | ||
659 | |||
660 | /* Get the PGD for the current thread */ | ||
661 | 3: | ||
662 | mfspr r11,SPRN_SPRG3 | ||
663 | lwz r11,PGDIR(r11) | ||
664 | |||
665 | 4: | ||
666 | FIND_PTE | ||
667 | andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ | ||
668 | beq 2f /* Bail if not present */ | ||
669 | |||
670 | #ifdef CONFIG_PTE_64BIT | ||
671 | lwz r13, 0(r12) | ||
672 | #endif | ||
673 | ori r11, r11, _PAGE_ACCESSED | ||
674 | stw r11, PTE_FLAGS_OFFSET(r12) | ||
675 | |||
676 | /* Jump to common TLB load point */ | ||
677 | b finish_tlb_load | ||
678 | |||
679 | 2: | ||
680 | /* The bailout. Restore registers to pre-exception conditions | ||
681 | * and call the heavyweights to help us out. | ||
682 | */ | ||
683 | mfspr r11, SPRN_SPRG7R | ||
684 | mtcr r11 | ||
685 | mfspr r13, SPRN_SPRG5R | ||
686 | mfspr r12, SPRN_SPRG4R | ||
687 | mfspr r11, SPRN_SPRG1 | ||
688 | mfspr r10, SPRN_SPRG0 | ||
689 | b InstructionStorage | ||
690 | |||
691 | #ifdef CONFIG_SPE | ||
692 | /* SPE Unavailable */ | ||
693 | START_EXCEPTION(SPEUnavailable) | ||
694 | NORMAL_EXCEPTION_PROLOG | ||
695 | bne load_up_spe | ||
696 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
697 | EXC_XFER_EE_LITE(0x2010, KernelSPE) | ||
698 | #else | ||
699 | EXCEPTION(0x2020, SPEUnavailable, UnknownException, EXC_XFER_EE) | ||
700 | #endif /* CONFIG_SPE */ | ||
701 | |||
702 | /* SPE Floating Point Data */ | ||
703 | #ifdef CONFIG_SPE | ||
704 | EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); | ||
705 | #else | ||
706 | EXCEPTION(0x2040, SPEFloatingPointData, UnknownException, EXC_XFER_EE) | ||
707 | #endif /* CONFIG_SPE */ | ||
708 | |||
709 | /* SPE Floating Point Round */ | ||
710 | EXCEPTION(0x2050, SPEFloatingPointRound, UnknownException, EXC_XFER_EE) | ||
711 | |||
712 | /* Performance Monitor */ | ||
713 | EXCEPTION(0x2060, PerformanceMonitor, PerformanceMonitorException, EXC_XFER_STD) | ||
714 | |||
715 | |||
716 | /* Debug Interrupt */ | ||
717 | DEBUG_EXCEPTION | ||
718 | |||
719 | /* | ||
720 | * Local functions | ||
721 | */ | ||
722 | |||
723 | /* | ||
724 | * Data TLB exceptions will bail out to this point | ||
725 | * if they can't resolve the lightweight TLB fault. | ||
726 | */ | ||
727 | data_access: | ||
728 | NORMAL_EXCEPTION_PROLOG | ||
729 | mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ | ||
730 | stw r5,_ESR(r11) | ||
731 | mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ | ||
732 | andis. r10,r5,(ESR_ILK|ESR_DLK)@h | ||
733 | bne 1f | ||
734 | EXC_XFER_EE_LITE(0x0300, handle_page_fault) | ||
735 | 1: | ||
736 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
737 | EXC_XFER_EE_LITE(0x0300, CacheLockingException) | ||
738 | |||
739 | /* | ||
740 | |||
741 | * Both the instruction and data TLB miss get to this | ||
742 | * point to load the TLB. | ||
743 | * r10 - EA of fault | ||
744 | * r11 - TLB (info from Linux PTE) | ||
745 | * r12, r13 - available to use | ||
746 | * CR5 - results of addr < TASK_SIZE | ||
747 | * MAS0, MAS1 - loaded with proper value when we get here | ||
748 | * MAS2, MAS3 - will need additional info from Linux PTE | ||
749 | * Upon exit, we reload everything and RFI. | ||
750 | */ | ||
751 | finish_tlb_load: | ||
752 | /* | ||
753 | * We set execute, because we don't have the granularity to | ||
754 | * properly set this at the page level (Linux problem). | ||
755 | * Many of these bits are software only. Bits we don't set | ||
756 | * here we (properly should) assume have the appropriate value. | ||
757 | */ | ||
758 | |||
759 | mfspr r12, SPRN_MAS2 | ||
760 | #ifdef CONFIG_PTE_64BIT | ||
761 | rlwimi r12, r11, 26, 24, 31 /* extract ...WIMGE from pte */ | ||
762 | #else | ||
763 | rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ | ||
764 | #endif | ||
765 | mtspr SPRN_MAS2, r12 | ||
766 | |||
767 | bge 5, 1f | ||
768 | |||
769 | /* is user addr */ | ||
770 | andi. r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC) | ||
771 | andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */ | ||
772 | srwi r10, r12, 1 | ||
773 | or r12, r12, r10 /* Copy user perms into supervisor */ | ||
774 | iseleq r12, 0, r12 | ||
775 | b 2f | ||
776 | |||
777 | /* is kernel addr */ | ||
778 | 1: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */ | ||
779 | ori r12, r12, (MAS3_SX | MAS3_SR) | ||
780 | |||
781 | #ifdef CONFIG_PTE_64BIT | ||
782 | 2: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ | ||
783 | rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */ | ||
784 | mtspr SPRN_MAS3, r12 | ||
785 | BEGIN_FTR_SECTION | ||
786 | srwi r10, r13, 8 /* grab RPN[8:31] */ | ||
787 | mtspr SPRN_MAS7, r10 | ||
788 | END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS) | ||
789 | #else | ||
790 | 2: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ | ||
791 | mtspr SPRN_MAS3, r11 | ||
792 | #endif | ||
793 | #ifdef CONFIG_E200 | ||
794 | /* Round robin TLB1 entries assignment */ | ||
795 | mfspr r12, SPRN_MAS0 | ||
796 | |||
797 | /* Extract TLB1CFG(NENTRY) */ | ||
798 | mfspr r11, SPRN_TLB1CFG | ||
799 | andi. r11, r11, 0xfff | ||
800 | |||
801 | /* Extract MAS0(NV) */ | ||
802 | andi. r13, r12, 0xfff | ||
803 | addi r13, r13, 1 | ||
804 | cmpw 0, r13, r11 | ||
805 | addi r12, r12, 1 | ||
806 | |||
807 | /* check if we need to wrap */ | ||
808 | blt 7f | ||
809 | |||
810 | /* wrap back to first free tlbcam entry */ | ||
811 | lis r13, tlbcam_index@ha | ||
812 | lwz r13, tlbcam_index@l(r13) | ||
813 | rlwimi r12, r13, 0, 20, 31 | ||
814 | 7: | ||
815 | mtspr SPRN_MAS0,r12 | ||
816 | #endif /* CONFIG_E200 */ | ||
817 | |||
818 | tlbwe | ||
819 | |||
820 | /* Done...restore registers and get out of here. */ | ||
821 | mfspr r11, SPRN_SPRG7R | ||
822 | mtcr r11 | ||
823 | mfspr r13, SPRN_SPRG5R | ||
824 | mfspr r12, SPRN_SPRG4R | ||
825 | mfspr r11, SPRN_SPRG1 | ||
826 | mfspr r10, SPRN_SPRG0 | ||
827 | rfi /* Force context change */ | ||
828 | |||
829 | #ifdef CONFIG_SPE | ||
830 | /* Note that the SPE support is closely modeled after the AltiVec | ||
831 | * support. Changes to one are likely to be applicable to the | ||
832 | * other! */ | ||
833 | load_up_spe: | ||
834 | /* | ||
835 | * Disable SPE for the task which had SPE previously, | ||
836 | * and save its SPE registers in its thread_struct. | ||
837 | * Enables SPE for use in the kernel on return. | ||
838 | * On SMP we know the SPE units are free, since we give it up every | ||
839 | * switch. -- Kumar | ||
840 | */ | ||
841 | mfmsr r5 | ||
842 | oris r5,r5,MSR_SPE@h | ||
843 | mtmsr r5 /* enable use of SPE now */ | ||
844 | isync | ||
845 | /* | ||
846 | * For SMP, we don't do lazy SPE switching because it just gets too | ||
847 | * horrendously complex, especially when a task switches from one CPU | ||
848 | * to another. Instead we call giveup_spe in switch_to. | ||
849 | */ | ||
850 | #ifndef CONFIG_SMP | ||
851 | lis r3,last_task_used_spe@ha | ||
852 | lwz r4,last_task_used_spe@l(r3) | ||
853 | cmpi 0,r4,0 | ||
854 | beq 1f | ||
855 | addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ | ||
856 | SAVE_32EVRS(0,r10,r4) | ||
857 | evxor evr10, evr10, evr10 /* clear out evr10 */ | ||
858 | evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ | ||
859 | li r5,THREAD_ACC | ||
860 | evstddx evr10, r4, r5 /* save off accumulator */ | ||
861 | lwz r5,PT_REGS(r4) | ||
862 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
863 | lis r10,MSR_SPE@h | ||
864 | andc r4,r4,r10 /* disable SPE for previous task */ | ||
865 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
866 | 1: | ||
867 | #endif /* CONFIG_SMP */ | ||
868 | /* enable use of SPE after return */ | ||
869 | oris r9,r9,MSR_SPE@h | ||
870 | mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ | ||
871 | li r4,1 | ||
872 | li r10,THREAD_ACC | ||
873 | stw r4,THREAD_USED_SPE(r5) | ||
874 | evlddx evr4,r10,r5 | ||
875 | evmra evr4,evr4 | ||
876 | REST_32EVRS(0,r10,r5) | ||
877 | #ifndef CONFIG_SMP | ||
878 | subi r4,r5,THREAD | ||
879 | stw r4,last_task_used_spe@l(r3) | ||
880 | #endif /* CONFIG_SMP */ | ||
881 | /* restore registers and return */ | ||
882 | 2: REST_4GPRS(3, r11) | ||
883 | lwz r10,_CCR(r11) | ||
884 | REST_GPR(1, r11) | ||
885 | mtcr r10 | ||
886 | lwz r10,_LINK(r11) | ||
887 | mtlr r10 | ||
888 | REST_GPR(10, r11) | ||
889 | mtspr SPRN_SRR1,r9 | ||
890 | mtspr SPRN_SRR0,r12 | ||
891 | REST_GPR(9, r11) | ||
892 | REST_GPR(12, r11) | ||
893 | lwz r11,GPR11(r11) | ||
894 | SYNC | ||
895 | rfi | ||
896 | |||
897 | /* | ||
898 | * SPE unavailable trap from kernel - print a message, but let | ||
899 | * the task use SPE in the kernel until it returns to user mode. | ||
900 | */ | ||
901 | KernelSPE: | ||
902 | lwz r3,_MSR(r1) | ||
903 | oris r3,r3,MSR_SPE@h | ||
904 | stw r3,_MSR(r1) /* enable use of SPE after return */ | ||
905 | lis r3,87f@h | ||
906 | ori r3,r3,87f@l | ||
907 | mr r4,r2 /* current */ | ||
908 | lwz r5,_NIP(r1) | ||
909 | bl printk | ||
910 | b ret_from_except | ||
911 | 87: .string "SPE used in kernel (task=%p, pc=%x) \n" | ||
912 | .align 4,0 | ||
913 | |||
914 | #endif /* CONFIG_SPE */ | ||
915 | |||
916 | /* | ||
917 | * Global functions | ||
918 | */ | ||
919 | |||
920 | /* | ||
921 | * extern void loadcam_entry(unsigned int index) | ||
922 | * | ||
923 | * Load TLBCAM[index] entry in to the L2 CAM MMU | ||
924 | */ | ||
925 | _GLOBAL(loadcam_entry) | ||
926 | lis r4,TLBCAM@ha | ||
927 | addi r4,r4,TLBCAM@l | ||
928 | mulli r5,r3,20 | ||
929 | add r3,r5,r4 | ||
930 | lwz r4,0(r3) | ||
931 | mtspr SPRN_MAS0,r4 | ||
932 | lwz r4,4(r3) | ||
933 | mtspr SPRN_MAS1,r4 | ||
934 | lwz r4,8(r3) | ||
935 | mtspr SPRN_MAS2,r4 | ||
936 | lwz r4,12(r3) | ||
937 | mtspr SPRN_MAS3,r4 | ||
938 | tlbwe | ||
939 | isync | ||
940 | blr | ||
941 | |||
942 | /* | ||
943 | * extern void giveup_altivec(struct task_struct *prev) | ||
944 | * | ||
945 | * The e500 core does not have an AltiVec unit. | ||
946 | */ | ||
947 | _GLOBAL(giveup_altivec) | ||
948 | blr | ||
949 | |||
950 | #ifdef CONFIG_SPE | ||
951 | /* | ||
952 | * extern void giveup_spe(struct task_struct *prev) | ||
953 | * | ||
954 | */ | ||
955 | _GLOBAL(giveup_spe) | ||
956 | mfmsr r5 | ||
957 | oris r5,r5,MSR_SPE@h | ||
958 | SYNC | ||
959 | mtmsr r5 /* enable use of SPE now */ | ||
960 | isync | ||
961 | cmpi 0,r3,0 | ||
962 | beqlr- /* if no previous owner, done */ | ||
963 | addi r3,r3,THREAD /* want THREAD of task */ | ||
964 | lwz r5,PT_REGS(r3) | ||
965 | cmpi 0,r5,0 | ||
966 | SAVE_32EVRS(0, r4, r3) | ||
967 | evxor evr6, evr6, evr6 /* clear out evr6 */ | ||
968 | evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ | ||
969 | li r4,THREAD_ACC | ||
970 | evstddx evr6, r4, r3 /* save off accumulator */ | ||
971 | mfspr r6,SPRN_SPEFSCR | ||
972 | stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */ | ||
973 | beq 1f | ||
974 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
975 | lis r3,MSR_SPE@h | ||
976 | andc r4,r4,r3 /* disable SPE for previous task */ | ||
977 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
978 | 1: | ||
979 | #ifndef CONFIG_SMP | ||
980 | li r5,0 | ||
981 | lis r4,last_task_used_spe@ha | ||
982 | stw r5,last_task_used_spe@l(r4) | ||
983 | #endif /* CONFIG_SMP */ | ||
984 | blr | ||
985 | #endif /* CONFIG_SPE */ | ||
986 | |||
987 | /* | ||
988 | * extern void giveup_fpu(struct task_struct *prev) | ||
989 | * | ||
990 | * Not all FSL Book-E cores have an FPU | ||
991 | */ | ||
992 | #ifndef CONFIG_PPC_FPU | ||
993 | _GLOBAL(giveup_fpu) | ||
994 | blr | ||
995 | #endif | ||
996 | |||
997 | /* | ||
998 | * extern void abort(void) | ||
999 | * | ||
1000 | * At present, this routine just applies a system reset. | ||
1001 | */ | ||
1002 | _GLOBAL(abort) | ||
1003 | li r13,0 | ||
1004 | mtspr SPRN_DBCR0,r13 /* disable all debug events */ | ||
1005 | mfmsr r13 | ||
1006 | ori r13,r13,MSR_DE@l /* Enable Debug Events */ | ||
1007 | mtmsr r13 | ||
1008 | mfspr r13,SPRN_DBCR0 | ||
1009 | lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h | ||
1010 | mtspr SPRN_DBCR0,r13 | ||
1011 | |||
1012 | _GLOBAL(set_context) | ||
1013 | |||
1014 | #ifdef CONFIG_BDI_SWITCH | ||
1015 | /* Context switch the PTE pointer for the Abatron BDI2000. | ||
1016 | * The PGDIR is the second parameter. | ||
1017 | */ | ||
1018 | lis r5, abatron_pteptrs@h | ||
1019 | ori r5, r5, abatron_pteptrs@l | ||
1020 | stw r4, 0x4(r5) | ||
1021 | #endif | ||
1022 | mtspr SPRN_PID,r3 | ||
1023 | isync /* Force context change */ | ||
1024 | blr | ||
1025 | |||
1026 | /* | ||
1027 | * We put a few things here that have to be page-aligned. This stuff | ||
1028 | * goes at the beginning of the data segment, which is page-aligned. | ||
1029 | */ | ||
1030 | .data | ||
1031 | _GLOBAL(sdata) | ||
1032 | _GLOBAL(empty_zero_page) | ||
1033 | .space 4096 | ||
1034 | _GLOBAL(swapper_pg_dir) | ||
1035 | .space 4096 | ||
1036 | |||
1037 | /* Reserved 4k for the critical exception stack & 4k for the machine | ||
1038 | * check stack per CPU for kernel mode exceptions */ | ||
1039 | .section .bss | ||
1040 | .align 12 | ||
1041 | exception_stack_bottom: | ||
1042 | .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS | ||
1043 | _GLOBAL(exception_stack_top) | ||
1044 | |||
1045 | /* | ||
1046 | * This space gets a copy of optional info passed to us by the bootstrap | ||
1047 | * which is used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
1048 | */ | ||
1049 | _GLOBAL(cmd_line) | ||
1050 | .space 512 | ||
1051 | |||
1052 | /* | ||
1053 | * Room for two PTE pointers, usually the kernel and current user pointers | ||
1054 | * to their respective root page table. | ||
1055 | */ | ||
1056 | abatron_pteptrs: | ||
1057 | .space 8 | ||
1058 | |||
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S new file mode 100644 index 000000000000..1a2194cf6828 --- /dev/null +++ b/arch/powerpc/kernel/idle_6xx.S | |||
@@ -0,0 +1,233 @@ | |||
1 | /* | ||
2 | * This file contains the power_save function for 6xx & 7xxx CPUs | ||
3 | * rewritten in assembler | ||
4 | * | ||
5 | * Warning ! This code assumes that if your machine has a 750fx | ||
6 | * it will have PLL 1 set to low speed mode (used during NAP/DOZE). | ||
7 | * if this is not the case some additional changes will have to | ||
8 | * be done to check a runtime var (a bit like powersave-nap) | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/config.h> | ||
17 | #include <linux/threads.h> | ||
18 | #include <asm/processor.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/cputable.h> | ||
21 | #include <asm/thread_info.h> | ||
22 | #include <asm/ppc_asm.h> | ||
23 | #include <asm/asm-offsets.h> | ||
24 | |||
25 | #undef DEBUG | ||
26 | |||
27 | .text | ||
28 | |||
29 | /* | ||
30 | * Init idle, called at early CPU setup time from head.S for each CPU | ||
31 | * Make sure no rest of NAP mode remains in HID0, save default | ||
32 | * values for some CPU specific registers. Called with r24 | ||
33 | * containing CPU number and r3 reloc offset | ||
34 | */ | ||
35 | _GLOBAL(init_idle_6xx) | ||
36 | BEGIN_FTR_SECTION | ||
37 | mfspr r4,SPRN_HID0 | ||
38 | rlwinm r4,r4,0,10,8 /* Clear NAP */ | ||
39 | mtspr SPRN_HID0, r4 | ||
40 | b 1f | ||
41 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) | ||
42 | blr | ||
43 | 1: | ||
44 | slwi r5,r24,2 | ||
45 | add r5,r5,r3 | ||
46 | BEGIN_FTR_SECTION | ||
47 | mfspr r4,SPRN_MSSCR0 | ||
48 | addis r6,r5, nap_save_msscr0@ha | ||
49 | stw r4,nap_save_msscr0@l(r6) | ||
50 | END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) | ||
51 | BEGIN_FTR_SECTION | ||
52 | mfspr r4,SPRN_HID1 | ||
53 | addis r6,r5,nap_save_hid1@ha | ||
54 | stw r4,nap_save_hid1@l(r6) | ||
55 | END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) | ||
56 | blr | ||
57 | |||
58 | /* | ||
59 | * Here is the power_save_6xx function. This could eventually be | ||
60 | * split into several functions & changing the function pointer | ||
61 | * depending on the various features. | ||
62 | */ | ||
63 | _GLOBAL(ppc6xx_idle) | ||
64 | /* Check if we can nap or doze, put HID0 mask in r3 | ||
65 | */ | ||
66 | lis r3, 0 | ||
67 | BEGIN_FTR_SECTION | ||
68 | lis r3,HID0_DOZE@h | ||
69 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) | ||
70 | BEGIN_FTR_SECTION | ||
71 | /* We must dynamically check for the NAP feature as it | ||
72 | * can be cleared by CPU init after the fixups are done | ||
73 | */ | ||
74 | lis r4,cur_cpu_spec@ha | ||
75 | lwz r4,cur_cpu_spec@l(r4) | ||
76 | lwz r4,CPU_SPEC_FEATURES(r4) | ||
77 | andi. r0,r4,CPU_FTR_CAN_NAP | ||
78 | beq 1f | ||
79 | /* Now check if user or arch enabled NAP mode */ | ||
80 | lis r4,powersave_nap@ha | ||
81 | lwz r4,powersave_nap@l(r4) | ||
82 | cmpwi 0,r4,0 | ||
83 | beq 1f | ||
84 | lis r3,HID0_NAP@h | ||
85 | 1: | ||
86 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) | ||
87 | cmpwi 0,r3,0 | ||
88 | beqlr | ||
89 | |||
90 | /* Clear MSR:EE */ | ||
91 | mfmsr r7 | ||
92 | rlwinm r0,r7,0,17,15 | ||
93 | mtmsr r0 | ||
94 | |||
95 | /* Check current_thread_info()->flags */ | ||
96 | rlwinm r4,r1,0,0,18 | ||
97 | lwz r4,TI_FLAGS(r4) | ||
98 | andi. r0,r4,_TIF_NEED_RESCHED | ||
99 | beq 1f | ||
100 | mtmsr r7 /* out of line this ? */ | ||
101 | blr | ||
102 | 1: | ||
103 | /* Some pre-nap cleanups needed on some CPUs */ | ||
104 | andis. r0,r3,HID0_NAP@h | ||
105 | beq 2f | ||
106 | BEGIN_FTR_SECTION | ||
107 | /* Disable L2 prefetch on some 745x and try to ensure | ||
108 | * L2 prefetch engines are idle. As explained by errata | ||
109 | * text, we can't be sure they are, we just hope very hard | ||
110 | * that well be enough (sic !). At least I noticed Apple | ||
111 | * doesn't even bother doing the dcbf's here... | ||
112 | */ | ||
113 | mfspr r4,SPRN_MSSCR0 | ||
114 | rlwinm r4,r4,0,0,29 | ||
115 | sync | ||
116 | mtspr SPRN_MSSCR0,r4 | ||
117 | sync | ||
118 | isync | ||
119 | lis r4,KERNELBASE@h | ||
120 | dcbf 0,r4 | ||
121 | dcbf 0,r4 | ||
122 | dcbf 0,r4 | ||
123 | dcbf 0,r4 | ||
124 | END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) | ||
125 | #ifdef DEBUG | ||
126 | lis r6,nap_enter_count@ha | ||
127 | lwz r4,nap_enter_count@l(r6) | ||
128 | addi r4,r4,1 | ||
129 | stw r4,nap_enter_count@l(r6) | ||
130 | #endif | ||
131 | 2: | ||
132 | BEGIN_FTR_SECTION | ||
133 | /* Go to low speed mode on some 750FX */ | ||
134 | lis r4,powersave_lowspeed@ha | ||
135 | lwz r4,powersave_lowspeed@l(r4) | ||
136 | cmpwi 0,r4,0 | ||
137 | beq 1f | ||
138 | mfspr r4,SPRN_HID1 | ||
139 | oris r4,r4,0x0001 | ||
140 | mtspr SPRN_HID1,r4 | ||
141 | 1: | ||
142 | END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) | ||
143 | |||
144 | /* Go to NAP or DOZE now */ | ||
145 | mfspr r4,SPRN_HID0 | ||
146 | lis r5,(HID0_NAP|HID0_SLEEP)@h | ||
147 | BEGIN_FTR_SECTION | ||
148 | oris r5,r5,HID0_DOZE@h | ||
149 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) | ||
150 | andc r4,r4,r5 | ||
151 | or r4,r4,r3 | ||
152 | BEGIN_FTR_SECTION | ||
153 | oris r4,r4,HID0_DPM@h /* that should be done once for all */ | ||
154 | END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) | ||
155 | mtspr SPRN_HID0,r4 | ||
156 | BEGIN_FTR_SECTION | ||
157 | DSSALL | ||
158 | sync | ||
159 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
160 | ori r7,r7,MSR_EE /* Could be ommited (already set) */ | ||
161 | oris r7,r7,MSR_POW@h | ||
162 | sync | ||
163 | isync | ||
164 | mtmsr r7 | ||
165 | isync | ||
166 | sync | ||
167 | blr | ||
168 | |||
169 | /* | ||
170 | * Return from NAP/DOZE mode, restore some CPU specific registers, | ||
171 | * we are called with DR/IR still off and r2 containing physical | ||
172 | * address of current. | ||
173 | */ | ||
174 | _GLOBAL(power_save_6xx_restore) | ||
175 | mfspr r11,SPRN_HID0 | ||
176 | rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */ | ||
177 | cror 4*cr1+eq,4*cr0+eq,4*cr0+eq | ||
178 | BEGIN_FTR_SECTION | ||
179 | rlwinm r11,r11,0,9,7 /* Clear DOZE */ | ||
180 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) | ||
181 | mtspr SPRN_HID0, r11 | ||
182 | |||
183 | #ifdef DEBUG | ||
184 | beq cr1,1f | ||
185 | lis r11,(nap_return_count-KERNELBASE)@ha | ||
186 | lwz r9,nap_return_count@l(r11) | ||
187 | addi r9,r9,1 | ||
188 | stw r9,nap_return_count@l(r11) | ||
189 | 1: | ||
190 | #endif | ||
191 | |||
192 | rlwinm r9,r1,0,0,18 | ||
193 | tophys(r9,r9) | ||
194 | lwz r11,TI_CPU(r9) | ||
195 | slwi r11,r11,2 | ||
196 | /* Todo make sure all these are in the same page | ||
197 | * and load r22 (@ha part + CPU offset) only once | ||
198 | */ | ||
199 | BEGIN_FTR_SECTION | ||
200 | beq cr1,1f | ||
201 | addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha | ||
202 | lwz r9,nap_save_msscr0@l(r9) | ||
203 | mtspr SPRN_MSSCR0, r9 | ||
204 | sync | ||
205 | isync | ||
206 | 1: | ||
207 | END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) | ||
208 | BEGIN_FTR_SECTION | ||
209 | addis r9,r11,(nap_save_hid1-KERNELBASE)@ha | ||
210 | lwz r9,nap_save_hid1@l(r9) | ||
211 | mtspr SPRN_HID1, r9 | ||
212 | END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) | ||
213 | b transfer_to_handler_cont | ||
214 | |||
215 | .data | ||
216 | |||
217 | _GLOBAL(nap_save_msscr0) | ||
218 | .space 4*NR_CPUS | ||
219 | |||
220 | _GLOBAL(nap_save_hid1) | ||
221 | .space 4*NR_CPUS | ||
222 | |||
223 | _GLOBAL(powersave_nap) | ||
224 | .long 0 | ||
225 | _GLOBAL(powersave_lowspeed) | ||
226 | .long 0 | ||
227 | |||
228 | #ifdef DEBUG | ||
229 | _GLOBAL(nap_enter_count) | ||
230 | .space 4 | ||
231 | _GLOBAL(nap_return_count) | ||
232 | .space 4 | ||
233 | #endif | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c new file mode 100644 index 000000000000..f5a9d2a84fa1 --- /dev/null +++ b/arch/powerpc/kernel/process.c | |||
@@ -0,0 +1,724 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/process.c | ||
3 | * | ||
4 | * Derived from "arch/i386/kernel/process.c" | ||
5 | * Copyright (C) 1995 Linus Torvalds | ||
6 | * | ||
7 | * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and | ||
8 | * Paul Mackerras (paulus@cs.anu.edu.au) | ||
9 | * | ||
10 | * PowerPC version | ||
11 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or | ||
14 | * modify it under the terms of the GNU General Public License | ||
15 | * as published by the Free Software Foundation; either version | ||
16 | * 2 of the License, or (at your option) any later version. | ||
17 | */ | ||
18 | |||
19 | #include <linux/config.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/smp.h> | ||
25 | #include <linux/smp_lock.h> | ||
26 | #include <linux/stddef.h> | ||
27 | #include <linux/unistd.h> | ||
28 | #include <linux/ptrace.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/user.h> | ||
31 | #include <linux/elf.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/prctl.h> | ||
34 | #include <linux/init_task.h> | ||
35 | #include <linux/module.h> | ||
36 | #include <linux/kallsyms.h> | ||
37 | #include <linux/mqueue.h> | ||
38 | #include <linux/hardirq.h> | ||
39 | |||
40 | #include <asm/pgtable.h> | ||
41 | #include <asm/uaccess.h> | ||
42 | #include <asm/system.h> | ||
43 | #include <asm/io.h> | ||
44 | #include <asm/processor.h> | ||
45 | #include <asm/mmu.h> | ||
46 | #include <asm/prom.h> | ||
47 | |||
48 | extern unsigned long _get_SP(void); | ||
49 | |||
50 | #ifndef CONFIG_SMP | ||
51 | struct task_struct *last_task_used_math = NULL; | ||
52 | struct task_struct *last_task_used_altivec = NULL; | ||
53 | struct task_struct *last_task_used_spe = NULL; | ||
54 | #endif | ||
55 | |||
56 | static struct fs_struct init_fs = INIT_FS; | ||
57 | static struct files_struct init_files = INIT_FILES; | ||
58 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | ||
59 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | ||
60 | struct mm_struct init_mm = INIT_MM(init_mm); | ||
61 | EXPORT_SYMBOL(init_mm); | ||
62 | |||
63 | /* this is 8kB-aligned so we can get to the thread_info struct | ||
64 | at the base of it from the stack pointer with 1 integer instruction. */ | ||
65 | union thread_union init_thread_union | ||
66 | __attribute__((__section__(".data.init_task"))) = | ||
67 | { INIT_THREAD_INFO(init_task) }; | ||
68 | |||
69 | /* initial task structure */ | ||
70 | struct task_struct init_task = INIT_TASK(init_task); | ||
71 | EXPORT_SYMBOL(init_task); | ||
72 | |||
73 | /* only used to get secondary processor up */ | ||
74 | struct task_struct *current_set[NR_CPUS] = {&init_task, }; | ||
75 | |||
76 | /* | ||
77 | * Make sure the floating-point register state in the | ||
78 | * the thread_struct is up to date for task tsk. | ||
79 | */ | ||
80 | void flush_fp_to_thread(struct task_struct *tsk) | ||
81 | { | ||
82 | if (tsk->thread.regs) { | ||
83 | /* | ||
84 | * We need to disable preemption here because if we didn't, | ||
85 | * another process could get scheduled after the regs->msr | ||
86 | * test but before we have finished saving the FP registers | ||
87 | * to the thread_struct. That process could take over the | ||
88 | * FPU, and then when we get scheduled again we would store | ||
89 | * bogus values for the remaining FP registers. | ||
90 | */ | ||
91 | preempt_disable(); | ||
92 | if (tsk->thread.regs->msr & MSR_FP) { | ||
93 | #ifdef CONFIG_SMP | ||
94 | /* | ||
95 | * This should only ever be called for current or | ||
96 | * for a stopped child process. Since we save away | ||
97 | * the FP register state on context switch on SMP, | ||
98 | * there is something wrong if a stopped child appears | ||
99 | * to still have its FP state in the CPU registers. | ||
100 | */ | ||
101 | BUG_ON(tsk != current); | ||
102 | #endif | ||
103 | giveup_fpu(current); | ||
104 | } | ||
105 | preempt_enable(); | ||
106 | } | ||
107 | } | ||
108 | |||
109 | void enable_kernel_fp(void) | ||
110 | { | ||
111 | WARN_ON(preemptible()); | ||
112 | |||
113 | #ifdef CONFIG_SMP | ||
114 | if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) | ||
115 | giveup_fpu(current); | ||
116 | else | ||
117 | giveup_fpu(NULL); /* just enables FP for kernel */ | ||
118 | #else | ||
119 | giveup_fpu(last_task_used_math); | ||
120 | #endif /* CONFIG_SMP */ | ||
121 | } | ||
122 | EXPORT_SYMBOL(enable_kernel_fp); | ||
123 | |||
124 | int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) | ||
125 | { | ||
126 | if (!tsk->thread.regs) | ||
127 | return 0; | ||
128 | flush_fp_to_thread(current); | ||
129 | |||
130 | memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs)); | ||
131 | |||
132 | return 1; | ||
133 | } | ||
134 | |||
135 | #ifdef CONFIG_ALTIVEC | ||
136 | void enable_kernel_altivec(void) | ||
137 | { | ||
138 | WARN_ON(preemptible()); | ||
139 | |||
140 | #ifdef CONFIG_SMP | ||
141 | if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) | ||
142 | giveup_altivec(current); | ||
143 | else | ||
144 | giveup_altivec(NULL); /* just enable AltiVec for kernel - force */ | ||
145 | #else | ||
146 | giveup_altivec(last_task_used_altivec); | ||
147 | #endif /* CONFIG_SMP */ | ||
148 | } | ||
149 | EXPORT_SYMBOL(enable_kernel_altivec); | ||
150 | |||
151 | /* | ||
152 | * Make sure the VMX/Altivec register state in the | ||
153 | * the thread_struct is up to date for task tsk. | ||
154 | */ | ||
155 | void flush_altivec_to_thread(struct task_struct *tsk) | ||
156 | { | ||
157 | if (tsk->thread.regs) { | ||
158 | preempt_disable(); | ||
159 | if (tsk->thread.regs->msr & MSR_VEC) { | ||
160 | #ifdef CONFIG_SMP | ||
161 | BUG_ON(tsk != current); | ||
162 | #endif | ||
163 | giveup_altivec(current); | ||
164 | } | ||
165 | preempt_enable(); | ||
166 | } | ||
167 | } | ||
168 | |||
169 | int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs) | ||
170 | { | ||
171 | flush_altivec_to_thread(current); | ||
172 | memcpy(vrregs, ¤t->thread.vr[0], sizeof(*vrregs)); | ||
173 | return 1; | ||
174 | } | ||
175 | #endif /* CONFIG_ALTIVEC */ | ||
176 | |||
177 | #ifdef CONFIG_SPE | ||
178 | |||
179 | void enable_kernel_spe(void) | ||
180 | { | ||
181 | WARN_ON(preemptible()); | ||
182 | |||
183 | #ifdef CONFIG_SMP | ||
184 | if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) | ||
185 | giveup_spe(current); | ||
186 | else | ||
187 | giveup_spe(NULL); /* just enable SPE for kernel - force */ | ||
188 | #else | ||
189 | giveup_spe(last_task_used_spe); | ||
190 | #endif /* __SMP __ */ | ||
191 | } | ||
192 | EXPORT_SYMBOL(enable_kernel_spe); | ||
193 | |||
194 | void flush_spe_to_thread(struct task_struct *tsk) | ||
195 | { | ||
196 | if (tsk->thread.regs) { | ||
197 | preempt_disable(); | ||
198 | if (tsk->thread.regs->msr & MSR_SPE) { | ||
199 | #ifdef CONFIG_SMP | ||
200 | BUG_ON(tsk != current); | ||
201 | #endif | ||
202 | giveup_spe(current); | ||
203 | } | ||
204 | preempt_enable(); | ||
205 | } | ||
206 | } | ||
207 | |||
208 | int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) | ||
209 | { | ||
210 | flush_spe_to_thread(current); | ||
211 | /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */ | ||
212 | memcpy(evrregs, ¤t->thread.evr[0], sizeof(u32) * 35); | ||
213 | return 1; | ||
214 | } | ||
215 | #endif /* CONFIG_SPE */ | ||
216 | |||
217 | static void set_dabr_spr(unsigned long val) | ||
218 | { | ||
219 | mtspr(SPRN_DABR, val); | ||
220 | } | ||
221 | |||
222 | int set_dabr(unsigned long dabr) | ||
223 | { | ||
224 | int ret = 0; | ||
225 | |||
226 | #ifdef CONFIG_PPC64 | ||
227 | if (firmware_has_feature(FW_FEATURE_XDABR)) { | ||
228 | /* We want to catch accesses from kernel and userspace */ | ||
229 | unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER; | ||
230 | ret = plpar_set_xdabr(dabr, flags); | ||
231 | } else if (firmware_has_feature(FW_FEATURE_DABR)) { | ||
232 | ret = plpar_set_dabr(dabr); | ||
233 | } else | ||
234 | #endif | ||
235 | set_dabr_spr(dabr); | ||
236 | |||
237 | return ret; | ||
238 | } | ||
239 | |||
240 | static DEFINE_PER_CPU(unsigned long, current_dabr); | ||
241 | |||
242 | struct task_struct *__switch_to(struct task_struct *prev, | ||
243 | struct task_struct *new) | ||
244 | { | ||
245 | struct thread_struct *new_thread, *old_thread; | ||
246 | unsigned long flags; | ||
247 | struct task_struct *last; | ||
248 | |||
249 | #ifdef CONFIG_SMP | ||
250 | /* avoid complexity of lazy save/restore of fpu | ||
251 | * by just saving it every time we switch out if | ||
252 | * this task used the fpu during the last quantum. | ||
253 | * | ||
254 | * If it tries to use the fpu again, it'll trap and | ||
255 | * reload its fp regs. So we don't have to do a restore | ||
256 | * every switch, just a save. | ||
257 | * -- Cort | ||
258 | */ | ||
259 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) | ||
260 | giveup_fpu(prev); | ||
261 | #ifdef CONFIG_ALTIVEC | ||
262 | /* | ||
263 | * If the previous thread used altivec in the last quantum | ||
264 | * (thus changing altivec regs) then save them. | ||
265 | * We used to check the VRSAVE register but not all apps | ||
266 | * set it, so we don't rely on it now (and in fact we need | ||
267 | * to save & restore VSCR even if VRSAVE == 0). -- paulus | ||
268 | * | ||
269 | * On SMP we always save/restore altivec regs just to avoid the | ||
270 | * complexity of changing processors. | ||
271 | * -- Cort | ||
272 | */ | ||
273 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) | ||
274 | giveup_altivec(prev); | ||
275 | /* Avoid the trap. On smp this this never happens since | ||
276 | * we don't set last_task_used_altivec -- Cort | ||
277 | */ | ||
278 | if (new->thread.regs && last_task_used_altivec == new) | ||
279 | new->thread.regs->msr |= MSR_VEC; | ||
280 | #endif /* CONFIG_ALTIVEC */ | ||
281 | #ifdef CONFIG_SPE | ||
282 | /* | ||
283 | * If the previous thread used spe in the last quantum | ||
284 | * (thus changing spe regs) then save them. | ||
285 | * | ||
286 | * On SMP we always save/restore spe regs just to avoid the | ||
287 | * complexity of changing processors. | ||
288 | */ | ||
289 | if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) | ||
290 | giveup_spe(prev); | ||
291 | /* Avoid the trap. On smp this this never happens since | ||
292 | * we don't set last_task_used_spe | ||
293 | */ | ||
294 | if (new->thread.regs && last_task_used_spe == new) | ||
295 | new->thread.regs->msr |= MSR_SPE; | ||
296 | #endif /* CONFIG_SPE */ | ||
297 | #endif /* CONFIG_SMP */ | ||
298 | |||
299 | #ifdef CONFIG_PPC64 /* for now */ | ||
300 | if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) { | ||
301 | set_dabr(new->thread.dabr); | ||
302 | __get_cpu_var(current_dabr) = new->thread.dabr; | ||
303 | } | ||
304 | #endif | ||
305 | |||
306 | new_thread = &new->thread; | ||
307 | old_thread = ¤t->thread; | ||
308 | local_irq_save(flags); | ||
309 | last = _switch(old_thread, new_thread); | ||
310 | |||
311 | local_irq_restore(flags); | ||
312 | |||
313 | return last; | ||
314 | } | ||
315 | |||
316 | void show_regs(struct pt_regs * regs) | ||
317 | { | ||
318 | int i, trap; | ||
319 | |||
320 | printk("NIP: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx %s\n", | ||
321 | regs->nip, regs->link, regs->gpr[1], regs, regs->trap, | ||
322 | print_tainted()); | ||
323 | printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n", | ||
324 | regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0, | ||
325 | regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0, | ||
326 | regs->msr&MSR_IR ? 1 : 0, | ||
327 | regs->msr&MSR_DR ? 1 : 0); | ||
328 | trap = TRAP(regs); | ||
329 | if (trap == 0x300 || trap == 0x600) | ||
330 | printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr); | ||
331 | printk("TASK = %p[%d] '%s' THREAD: %p\n", | ||
332 | current, current->pid, current->comm, current->thread_info); | ||
333 | printk("Last syscall: %ld ", current->thread.last_syscall); | ||
334 | |||
335 | #ifdef CONFIG_SMP | ||
336 | printk(" CPU: %d", smp_processor_id()); | ||
337 | #endif /* CONFIG_SMP */ | ||
338 | |||
339 | for (i = 0; i < 32; i++) { | ||
340 | long r; | ||
341 | if ((i % 8) == 0) | ||
342 | printk("\n" KERN_INFO "GPR%02d: ", i); | ||
343 | if (__get_user(r, ®s->gpr[i])) | ||
344 | break; | ||
345 | printk("%08lX ", r); | ||
346 | if (i == 12 && !FULL_REGS(regs)) | ||
347 | break; | ||
348 | } | ||
349 | printk("\n"); | ||
350 | #ifdef CONFIG_KALLSYMS | ||
351 | /* | ||
352 | * Lookup NIP late so we have the best change of getting the | ||
353 | * above info out without failing | ||
354 | */ | ||
355 | printk("NIP [%08lx] ", regs->nip); | ||
356 | print_symbol("%s\n", regs->nip); | ||
357 | printk("LR [%08lx] ", regs->link); | ||
358 | print_symbol("%s\n", regs->link); | ||
359 | #endif | ||
360 | show_stack(current, (unsigned long *) regs->gpr[1]); | ||
361 | } | ||
362 | |||
363 | void exit_thread(void) | ||
364 | { | ||
365 | #ifndef CONFIG_SMP | ||
366 | if (last_task_used_math == current) | ||
367 | last_task_used_math = NULL; | ||
368 | #ifdef CONFIG_ALTIVEC | ||
369 | if (last_task_used_altivec == current) | ||
370 | last_task_used_altivec = NULL; | ||
371 | #endif /* CONFIG_ALTIVEC */ | ||
372 | #ifdef CONFIG_SPE | ||
373 | if (last_task_used_spe == current) | ||
374 | last_task_used_spe = NULL; | ||
375 | #endif | ||
376 | #endif /* CONFIG_SMP */ | ||
377 | } | ||
378 | |||
379 | void flush_thread(void) | ||
380 | { | ||
381 | #ifndef CONFIG_SMP | ||
382 | if (last_task_used_math == current) | ||
383 | last_task_used_math = NULL; | ||
384 | #ifdef CONFIG_ALTIVEC | ||
385 | if (last_task_used_altivec == current) | ||
386 | last_task_used_altivec = NULL; | ||
387 | #endif /* CONFIG_ALTIVEC */ | ||
388 | #ifdef CONFIG_SPE | ||
389 | if (last_task_used_spe == current) | ||
390 | last_task_used_spe = NULL; | ||
391 | #endif | ||
392 | #endif /* CONFIG_SMP */ | ||
393 | |||
394 | #ifdef CONFIG_PPC64 /* for now */ | ||
395 | if (current->thread.dabr) { | ||
396 | current->thread.dabr = 0; | ||
397 | set_dabr(0); | ||
398 | } | ||
399 | #endif | ||
400 | } | ||
401 | |||
402 | void | ||
403 | release_thread(struct task_struct *t) | ||
404 | { | ||
405 | } | ||
406 | |||
407 | /* | ||
408 | * This gets called before we allocate a new thread and copy | ||
409 | * the current task into it. | ||
410 | */ | ||
411 | void prepare_to_copy(struct task_struct *tsk) | ||
412 | { | ||
413 | flush_fp_to_thread(current); | ||
414 | flush_altivec_to_thread(current); | ||
415 | flush_spe_to_thread(current); | ||
416 | } | ||
417 | |||
418 | /* | ||
419 | * Copy a thread.. | ||
420 | */ | ||
421 | int | ||
422 | copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | ||
423 | unsigned long unused, | ||
424 | struct task_struct *p, struct pt_regs *regs) | ||
425 | { | ||
426 | struct pt_regs *childregs, *kregs; | ||
427 | extern void ret_from_fork(void); | ||
428 | unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE; | ||
429 | unsigned long childframe; | ||
430 | |||
431 | CHECK_FULL_REGS(regs); | ||
432 | /* Copy registers */ | ||
433 | sp -= sizeof(struct pt_regs); | ||
434 | childregs = (struct pt_regs *) sp; | ||
435 | *childregs = *regs; | ||
436 | if ((childregs->msr & MSR_PR) == 0) { | ||
437 | /* for kernel thread, set `current' and stackptr in new task */ | ||
438 | childregs->gpr[1] = sp + sizeof(struct pt_regs); | ||
439 | childregs->gpr[2] = (unsigned long) p; | ||
440 | p->thread.regs = NULL; /* no user register state */ | ||
441 | } else { | ||
442 | childregs->gpr[1] = usp; | ||
443 | p->thread.regs = childregs; | ||
444 | if (clone_flags & CLONE_SETTLS) | ||
445 | childregs->gpr[2] = childregs->gpr[6]; | ||
446 | } | ||
447 | childregs->gpr[3] = 0; /* Result from fork() */ | ||
448 | sp -= STACK_FRAME_OVERHEAD; | ||
449 | childframe = sp; | ||
450 | |||
451 | /* | ||
452 | * The way this works is that at some point in the future | ||
453 | * some task will call _switch to switch to the new task. | ||
454 | * That will pop off the stack frame created below and start | ||
455 | * the new task running at ret_from_fork. The new task will | ||
456 | * do some house keeping and then return from the fork or clone | ||
457 | * system call, using the stack frame created above. | ||
458 | */ | ||
459 | sp -= sizeof(struct pt_regs); | ||
460 | kregs = (struct pt_regs *) sp; | ||
461 | sp -= STACK_FRAME_OVERHEAD; | ||
462 | p->thread.ksp = sp; | ||
463 | kregs->nip = (unsigned long)ret_from_fork; | ||
464 | |||
465 | p->thread.last_syscall = -1; | ||
466 | |||
467 | return 0; | ||
468 | } | ||
469 | |||
470 | /* | ||
471 | * Set up a thread for executing a new program | ||
472 | */ | ||
473 | void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp) | ||
474 | { | ||
475 | set_fs(USER_DS); | ||
476 | memset(regs->gpr, 0, sizeof(regs->gpr)); | ||
477 | regs->ctr = 0; | ||
478 | regs->link = 0; | ||
479 | regs->xer = 0; | ||
480 | regs->ccr = 0; | ||
481 | regs->mq = 0; | ||
482 | regs->nip = nip; | ||
483 | regs->gpr[1] = sp; | ||
484 | regs->msr = MSR_USER; | ||
485 | #ifndef CONFIG_SMP | ||
486 | if (last_task_used_math == current) | ||
487 | last_task_used_math = NULL; | ||
488 | #ifdef CONFIG_ALTIVEC | ||
489 | if (last_task_used_altivec == current) | ||
490 | last_task_used_altivec = NULL; | ||
491 | #endif | ||
492 | #ifdef CONFIG_SPE | ||
493 | if (last_task_used_spe == current) | ||
494 | last_task_used_spe = NULL; | ||
495 | #endif | ||
496 | #endif /* CONFIG_SMP */ | ||
497 | memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); | ||
498 | current->thread.fpscr = 0; | ||
499 | #ifdef CONFIG_ALTIVEC | ||
500 | memset(current->thread.vr, 0, sizeof(current->thread.vr)); | ||
501 | memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr)); | ||
502 | current->thread.vrsave = 0; | ||
503 | current->thread.used_vr = 0; | ||
504 | #endif /* CONFIG_ALTIVEC */ | ||
505 | #ifdef CONFIG_SPE | ||
506 | memset(current->thread.evr, 0, sizeof(current->thread.evr)); | ||
507 | current->thread.acc = 0; | ||
508 | current->thread.spefscr = 0; | ||
509 | current->thread.used_spe = 0; | ||
510 | #endif /* CONFIG_SPE */ | ||
511 | } | ||
512 | |||
513 | #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ | ||
514 | | PR_FP_EXC_RES | PR_FP_EXC_INV) | ||
515 | |||
516 | int set_fpexc_mode(struct task_struct *tsk, unsigned int val) | ||
517 | { | ||
518 | struct pt_regs *regs = tsk->thread.regs; | ||
519 | |||
520 | /* This is a bit hairy. If we are an SPE enabled processor | ||
521 | * (have embedded fp) we store the IEEE exception enable flags in | ||
522 | * fpexc_mode. fpexc_mode is also used for setting FP exception | ||
523 | * mode (asyn, precise, disabled) for 'Classic' FP. */ | ||
524 | if (val & PR_FP_EXC_SW_ENABLE) { | ||
525 | #ifdef CONFIG_SPE | ||
526 | tsk->thread.fpexc_mode = val & | ||
527 | (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); | ||
528 | #else | ||
529 | return -EINVAL; | ||
530 | #endif | ||
531 | } else { | ||
532 | /* on a CONFIG_SPE this does not hurt us. The bits that | ||
533 | * __pack_fe01 use do not overlap with bits used for | ||
534 | * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits | ||
535 | * on CONFIG_SPE implementations are reserved so writing to | ||
536 | * them does not change anything */ | ||
537 | if (val > PR_FP_EXC_PRECISE) | ||
538 | return -EINVAL; | ||
539 | tsk->thread.fpexc_mode = __pack_fe01(val); | ||
540 | if (regs != NULL && (regs->msr & MSR_FP) != 0) | ||
541 | regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) | ||
542 | | tsk->thread.fpexc_mode; | ||
543 | } | ||
544 | return 0; | ||
545 | } | ||
546 | |||
547 | int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) | ||
548 | { | ||
549 | unsigned int val; | ||
550 | |||
551 | if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) | ||
552 | #ifdef CONFIG_SPE | ||
553 | val = tsk->thread.fpexc_mode; | ||
554 | #else | ||
555 | return -EINVAL; | ||
556 | #endif | ||
557 | else | ||
558 | val = __unpack_fe01(tsk->thread.fpexc_mode); | ||
559 | return put_user(val, (unsigned int __user *) adr); | ||
560 | } | ||
561 | |||
562 | int sys_clone(unsigned long clone_flags, unsigned long usp, | ||
563 | int __user *parent_tidp, void __user *child_threadptr, | ||
564 | int __user *child_tidp, int p6, | ||
565 | struct pt_regs *regs) | ||
566 | { | ||
567 | CHECK_FULL_REGS(regs); | ||
568 | if (usp == 0) | ||
569 | usp = regs->gpr[1]; /* stack pointer for child */ | ||
570 | return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp); | ||
571 | } | ||
572 | |||
573 | int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3, | ||
574 | unsigned long p4, unsigned long p5, unsigned long p6, | ||
575 | struct pt_regs *regs) | ||
576 | { | ||
577 | CHECK_FULL_REGS(regs); | ||
578 | return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); | ||
579 | } | ||
580 | |||
581 | int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3, | ||
582 | unsigned long p4, unsigned long p5, unsigned long p6, | ||
583 | struct pt_regs *regs) | ||
584 | { | ||
585 | CHECK_FULL_REGS(regs); | ||
586 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], | ||
587 | regs, 0, NULL, NULL); | ||
588 | } | ||
589 | |||
590 | int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, | ||
591 | unsigned long a3, unsigned long a4, unsigned long a5, | ||
592 | struct pt_regs *regs) | ||
593 | { | ||
594 | int error; | ||
595 | char * filename; | ||
596 | |||
597 | filename = getname((char __user *) a0); | ||
598 | error = PTR_ERR(filename); | ||
599 | if (IS_ERR(filename)) | ||
600 | goto out; | ||
601 | flush_fp_to_thread(current); | ||
602 | flush_altivec_to_thread(current); | ||
603 | flush_spe_to_thread(current); | ||
604 | if (error == 0) { | ||
605 | task_lock(current); | ||
606 | current->ptrace &= ~PT_DTRACE; | ||
607 | task_unlock(current); | ||
608 | } | ||
609 | putname(filename); | ||
610 | out: | ||
611 | return error; | ||
612 | } | ||
613 | |||
614 | static int validate_sp(unsigned long sp, struct task_struct *p, | ||
615 | unsigned long nbytes) | ||
616 | { | ||
617 | unsigned long stack_page = (unsigned long)p->thread_info; | ||
618 | |||
619 | if (sp >= stack_page + sizeof(struct thread_struct) | ||
620 | && sp <= stack_page + THREAD_SIZE - nbytes) | ||
621 | return 1; | ||
622 | |||
623 | #ifdef CONFIG_IRQSTACKS | ||
624 | stack_page = (unsigned long) hardirq_ctx[task_cpu(p)]; | ||
625 | if (sp >= stack_page + sizeof(struct thread_struct) | ||
626 | && sp <= stack_page + THREAD_SIZE - nbytes) | ||
627 | return 1; | ||
628 | |||
629 | stack_page = (unsigned long) softirq_ctx[task_cpu(p)]; | ||
630 | if (sp >= stack_page + sizeof(struct thread_struct) | ||
631 | && sp <= stack_page + THREAD_SIZE - nbytes) | ||
632 | return 1; | ||
633 | #endif | ||
634 | |||
635 | return 0; | ||
636 | } | ||
637 | |||
638 | void dump_stack(void) | ||
639 | { | ||
640 | show_stack(current, NULL); | ||
641 | } | ||
642 | |||
643 | EXPORT_SYMBOL(dump_stack); | ||
644 | |||
645 | void show_stack(struct task_struct *tsk, unsigned long *stack) | ||
646 | { | ||
647 | unsigned long sp, stack_top, prev_sp, ret; | ||
648 | int count = 0; | ||
649 | unsigned long next_exc = 0; | ||
650 | struct pt_regs *regs; | ||
651 | extern char ret_from_except, ret_from_except_full, ret_from_syscall; | ||
652 | |||
653 | sp = (unsigned long) stack; | ||
654 | if (tsk == NULL) | ||
655 | tsk = current; | ||
656 | if (sp == 0) { | ||
657 | if (tsk == current) | ||
658 | asm("mr %0,1" : "=r" (sp)); | ||
659 | else | ||
660 | sp = tsk->thread.ksp; | ||
661 | } | ||
662 | |||
663 | prev_sp = (unsigned long) (tsk->thread_info + 1); | ||
664 | stack_top = (unsigned long) tsk->thread_info + THREAD_SIZE; | ||
665 | while (count < 16 && sp > prev_sp && sp < stack_top && (sp & 3) == 0) { | ||
666 | if (count == 0) { | ||
667 | printk("Call trace:"); | ||
668 | #ifdef CONFIG_KALLSYMS | ||
669 | printk("\n"); | ||
670 | #endif | ||
671 | } else { | ||
672 | if (next_exc) { | ||
673 | ret = next_exc; | ||
674 | next_exc = 0; | ||
675 | } else | ||
676 | ret = *(unsigned long *)(sp + 4); | ||
677 | printk(" [%08lx] ", ret); | ||
678 | #ifdef CONFIG_KALLSYMS | ||
679 | print_symbol("%s", ret); | ||
680 | printk("\n"); | ||
681 | #endif | ||
682 | if (ret == (unsigned long) &ret_from_except | ||
683 | || ret == (unsigned long) &ret_from_except_full | ||
684 | || ret == (unsigned long) &ret_from_syscall) { | ||
685 | /* sp + 16 points to an exception frame */ | ||
686 | regs = (struct pt_regs *) (sp + 16); | ||
687 | if (sp + 16 + sizeof(*regs) <= stack_top) | ||
688 | next_exc = regs->nip; | ||
689 | } | ||
690 | } | ||
691 | ++count; | ||
692 | sp = *(unsigned long *)sp; | ||
693 | } | ||
694 | #ifndef CONFIG_KALLSYMS | ||
695 | if (count > 0) | ||
696 | printk("\n"); | ||
697 | #endif | ||
698 | } | ||
699 | |||
700 | unsigned long get_wchan(struct task_struct *p) | ||
701 | { | ||
702 | unsigned long ip, sp; | ||
703 | int count = 0; | ||
704 | |||
705 | if (!p || p == current || p->state == TASK_RUNNING) | ||
706 | return 0; | ||
707 | |||
708 | sp = p->thread.ksp; | ||
709 | if (!validate_sp(sp, p, 16)) | ||
710 | return 0; | ||
711 | |||
712 | do { | ||
713 | sp = *(unsigned long *)sp; | ||
714 | if (!validate_sp(sp, p, 16)) | ||
715 | return 0; | ||
716 | if (count > 0) { | ||
717 | ip = *(unsigned long *)(sp + 4); | ||
718 | if (!in_sched_functions(ip)) | ||
719 | return ip; | ||
720 | } | ||
721 | } while (count++ < 16); | ||
722 | return 0; | ||
723 | } | ||
724 | EXPORT_SYMBOL(get_wchan); | ||
diff --git a/arch/powerpc/kernel/semaphore.c b/arch/powerpc/kernel/semaphore.c new file mode 100644 index 000000000000..2f8c3c951394 --- /dev/null +++ b/arch/powerpc/kernel/semaphore.c | |||
@@ -0,0 +1,135 @@ | |||
1 | /* | ||
2 | * PowerPC-specific semaphore code. | ||
3 | * | ||
4 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * April 2001 - Reworked by Paul Mackerras <paulus@samba.org> | ||
12 | * to eliminate the SMP races in the old version between the updates | ||
13 | * of `count' and `waking'. Now we use negative `count' values to | ||
14 | * indicate that some process(es) are waiting for the semaphore. | ||
15 | */ | ||
16 | |||
17 | #include <linux/sched.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/module.h> | ||
20 | |||
21 | #include <asm/atomic.h> | ||
22 | #include <asm/semaphore.h> | ||
23 | #include <asm/errno.h> | ||
24 | |||
25 | /* | ||
26 | * Atomically update sem->count. | ||
27 | * This does the equivalent of the following: | ||
28 | * | ||
29 | * old_count = sem->count; | ||
30 | * tmp = MAX(old_count, 0) + incr; | ||
31 | * sem->count = tmp; | ||
32 | * return old_count; | ||
33 | */ | ||
34 | static inline int __sem_update_count(struct semaphore *sem, int incr) | ||
35 | { | ||
36 | int old_count, tmp; | ||
37 | |||
38 | __asm__ __volatile__("\n" | ||
39 | "1: lwarx %0,0,%3\n" | ||
40 | " srawi %1,%0,31\n" | ||
41 | " andc %1,%0,%1\n" | ||
42 | " add %1,%1,%4\n" | ||
43 | PPC405_ERR77(0,%3) | ||
44 | " stwcx. %1,0,%3\n" | ||
45 | " bne 1b" | ||
46 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | ||
47 | : "r" (&sem->count), "r" (incr), "m" (sem->count) | ||
48 | : "cc"); | ||
49 | |||
50 | return old_count; | ||
51 | } | ||
52 | |||
53 | void __up(struct semaphore *sem) | ||
54 | { | ||
55 | /* | ||
56 | * Note that we incremented count in up() before we came here, | ||
57 | * but that was ineffective since the result was <= 0, and | ||
58 | * any negative value of count is equivalent to 0. | ||
59 | * This ends up setting count to 1, unless count is now > 0 | ||
60 | * (i.e. because some other cpu has called up() in the meantime), | ||
61 | * in which case we just increment count. | ||
62 | */ | ||
63 | __sem_update_count(sem, 1); | ||
64 | wake_up(&sem->wait); | ||
65 | } | ||
66 | EXPORT_SYMBOL(__up); | ||
67 | |||
68 | /* | ||
69 | * Note that when we come in to __down or __down_interruptible, | ||
70 | * we have already decremented count, but that decrement was | ||
71 | * ineffective since the result was < 0, and any negative value | ||
72 | * of count is equivalent to 0. | ||
73 | * Thus it is only when we decrement count from some value > 0 | ||
74 | * that we have actually got the semaphore. | ||
75 | */ | ||
76 | void __sched __down(struct semaphore *sem) | ||
77 | { | ||
78 | struct task_struct *tsk = current; | ||
79 | DECLARE_WAITQUEUE(wait, tsk); | ||
80 | |||
81 | __set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
82 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
83 | |||
84 | /* | ||
85 | * Try to get the semaphore. If the count is > 0, then we've | ||
86 | * got the semaphore; we decrement count and exit the loop. | ||
87 | * If the count is 0 or negative, we set it to -1, indicating | ||
88 | * that we are asleep, and then sleep. | ||
89 | */ | ||
90 | while (__sem_update_count(sem, -1) <= 0) { | ||
91 | schedule(); | ||
92 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
93 | } | ||
94 | remove_wait_queue(&sem->wait, &wait); | ||
95 | __set_task_state(tsk, TASK_RUNNING); | ||
96 | |||
97 | /* | ||
98 | * If there are any more sleepers, wake one of them up so | ||
99 | * that it can either get the semaphore, or set count to -1 | ||
100 | * indicating that there are still processes sleeping. | ||
101 | */ | ||
102 | wake_up(&sem->wait); | ||
103 | } | ||
104 | EXPORT_SYMBOL(__down); | ||
105 | |||
106 | int __sched __down_interruptible(struct semaphore * sem) | ||
107 | { | ||
108 | int retval = 0; | ||
109 | struct task_struct *tsk = current; | ||
110 | DECLARE_WAITQUEUE(wait, tsk); | ||
111 | |||
112 | __set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
113 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
114 | |||
115 | while (__sem_update_count(sem, -1) <= 0) { | ||
116 | if (signal_pending(current)) { | ||
117 | /* | ||
118 | * A signal is pending - give up trying. | ||
119 | * Set sem->count to 0 if it is negative, | ||
120 | * since we are no longer sleeping. | ||
121 | */ | ||
122 | __sem_update_count(sem, 0); | ||
123 | retval = -EINTR; | ||
124 | break; | ||
125 | } | ||
126 | schedule(); | ||
127 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
128 | } | ||
129 | remove_wait_queue(&sem->wait, &wait); | ||
130 | __set_task_state(tsk, TASK_RUNNING); | ||
131 | |||
132 | wake_up(&sem->wait); | ||
133 | return retval; | ||
134 | } | ||
135 | EXPORT_SYMBOL(__down_interruptible); | ||
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c new file mode 100644 index 000000000000..c7afbbba0f36 --- /dev/null +++ b/arch/powerpc/kernel/traps.c | |||
@@ -0,0 +1,1047 @@ | |||
1 | /* | ||
2 | * arch/powerpc/kernel/traps.c | ||
3 | * | ||
4 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * Modified by Cort Dougan (cort@cs.nmt.edu) | ||
12 | * and Paul Mackerras (paulus@samba.org) | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * This file handles the architecture-dependent parts of hardware exceptions | ||
17 | */ | ||
18 | |||
19 | #include <linux/config.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/stddef.h> | ||
25 | #include <linux/unistd.h> | ||
26 | #include <linux/ptrace.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/user.h> | ||
29 | #include <linux/a.out.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/config.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/prctl.h> | ||
35 | #include <linux/delay.h> | ||
36 | #include <linux/kprobes.h> | ||
37 | #include <asm/kdebug.h> | ||
38 | |||
39 | #include <asm/pgtable.h> | ||
40 | #include <asm/uaccess.h> | ||
41 | #include <asm/system.h> | ||
42 | #include <asm/io.h> | ||
43 | #include <asm/reg.h> | ||
44 | #include <asm/xmon.h> | ||
45 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
46 | #include <asm/backlight.h> | ||
47 | #endif | ||
48 | #include <asm/perfmon.h> | ||
49 | |||
50 | #ifdef CONFIG_DEBUGGER | ||
51 | int (*__debugger)(struct pt_regs *regs); | ||
52 | int (*__debugger_ipi)(struct pt_regs *regs); | ||
53 | int (*__debugger_bpt)(struct pt_regs *regs); | ||
54 | int (*__debugger_sstep)(struct pt_regs *regs); | ||
55 | int (*__debugger_iabr_match)(struct pt_regs *regs); | ||
56 | int (*__debugger_dabr_match)(struct pt_regs *regs); | ||
57 | int (*__debugger_fault_handler)(struct pt_regs *regs); | ||
58 | |||
59 | EXPORT_SYMBOL(__debugger); | ||
60 | EXPORT_SYMBOL(__debugger_ipi); | ||
61 | EXPORT_SYMBOL(__debugger_bpt); | ||
62 | EXPORT_SYMBOL(__debugger_sstep); | ||
63 | EXPORT_SYMBOL(__debugger_iabr_match); | ||
64 | EXPORT_SYMBOL(__debugger_dabr_match); | ||
65 | EXPORT_SYMBOL(__debugger_fault_handler); | ||
66 | #endif | ||
67 | |||
68 | struct notifier_block *powerpc_die_chain; | ||
69 | static DEFINE_SPINLOCK(die_notifier_lock); | ||
70 | |||
71 | int register_die_notifier(struct notifier_block *nb) | ||
72 | { | ||
73 | int err = 0; | ||
74 | unsigned long flags; | ||
75 | |||
76 | spin_lock_irqsave(&die_notifier_lock, flags); | ||
77 | err = notifier_chain_register(&powerpc_die_chain, nb); | ||
78 | spin_unlock_irqrestore(&die_notifier_lock, flags); | ||
79 | return err; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Trap & Exception support | ||
84 | */ | ||
85 | |||
86 | static DEFINE_SPINLOCK(die_lock); | ||
87 | |||
88 | int die(const char *str, struct pt_regs *regs, long err) | ||
89 | { | ||
90 | static int die_counter; | ||
91 | int nl = 0; | ||
92 | |||
93 | if (debugger(regs)) | ||
94 | return 1; | ||
95 | |||
96 | console_verbose(); | ||
97 | spin_lock_irq(&die_lock); | ||
98 | bust_spinlocks(1); | ||
99 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
100 | if (_machine == _MACH_Pmac) { | ||
101 | set_backlight_enable(1); | ||
102 | set_backlight_level(BACKLIGHT_MAX); | ||
103 | } | ||
104 | #endif | ||
105 | printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); | ||
106 | #ifdef CONFIG_PREEMPT | ||
107 | printk("PREEMPT "); | ||
108 | nl = 1; | ||
109 | #endif | ||
110 | #ifdef CONFIG_SMP | ||
111 | printk("SMP NR_CPUS=%d ", NR_CPUS); | ||
112 | nl = 1; | ||
113 | #endif | ||
114 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
115 | printk("DEBUG_PAGEALLOC "); | ||
116 | nl = 1; | ||
117 | #endif | ||
118 | #ifdef CONFIG_NUMA | ||
119 | printk("NUMA "); | ||
120 | nl = 1; | ||
121 | #endif | ||
122 | #ifdef CONFIG_PPC64 | ||
123 | switch (systemcfg->platform) { | ||
124 | case PLATFORM_PSERIES: | ||
125 | printk("PSERIES "); | ||
126 | nl = 1; | ||
127 | break; | ||
128 | case PLATFORM_PSERIES_LPAR: | ||
129 | printk("PSERIES LPAR "); | ||
130 | nl = 1; | ||
131 | break; | ||
132 | case PLATFORM_ISERIES_LPAR: | ||
133 | printk("ISERIES LPAR "); | ||
134 | nl = 1; | ||
135 | break; | ||
136 | case PLATFORM_POWERMAC: | ||
137 | printk("POWERMAC "); | ||
138 | nl = 1; | ||
139 | break; | ||
140 | case PLATFORM_BPA: | ||
141 | printk("BPA "); | ||
142 | nl = 1; | ||
143 | break; | ||
144 | } | ||
145 | #endif | ||
146 | if (nl) | ||
147 | printk("\n"); | ||
148 | print_modules(); | ||
149 | show_regs(regs); | ||
150 | bust_spinlocks(0); | ||
151 | spin_unlock_irq(&die_lock); | ||
152 | |||
153 | if (in_interrupt()) | ||
154 | panic("Fatal exception in interrupt"); | ||
155 | |||
156 | if (panic_on_oops) { | ||
157 | panic("Fatal exception"); | ||
158 | } | ||
159 | do_exit(err); | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) | ||
165 | { | ||
166 | siginfo_t info; | ||
167 | |||
168 | if (!user_mode(regs)) { | ||
169 | if (die("Exception in kernel mode", regs, signr)) | ||
170 | return; | ||
171 | } | ||
172 | |||
173 | memset(&info, 0, sizeof(info)); | ||
174 | info.si_signo = signr; | ||
175 | info.si_code = code; | ||
176 | info.si_addr = (void __user *) addr; | ||
177 | force_sig_info(signr, &info, current); | ||
178 | |||
179 | /* | ||
180 | * Init gets no signals that it doesn't have a handler for. | ||
181 | * That's all very well, but if it has caused a synchronous | ||
182 | * exception and we ignore the resulting signal, it will just | ||
183 | * generate the same exception over and over again and we get | ||
184 | * nowhere. Better to kill it and let the kernel panic. | ||
185 | */ | ||
186 | if (current->pid == 1) { | ||
187 | __sighandler_t handler; | ||
188 | |||
189 | spin_lock_irq(¤t->sighand->siglock); | ||
190 | handler = current->sighand->action[signr-1].sa.sa_handler; | ||
191 | spin_unlock_irq(¤t->sighand->siglock); | ||
192 | if (handler == SIG_DFL) { | ||
193 | /* init has generated a synchronous exception | ||
194 | and it doesn't have a handler for the signal */ | ||
195 | printk(KERN_CRIT "init has generated signal %d " | ||
196 | "but has no handler for it\n", signr); | ||
197 | do_exit(signr); | ||
198 | } | ||
199 | } | ||
200 | } | ||
201 | |||
202 | #ifdef CONFIG_PPC64 | ||
203 | void system_reset_exception(struct pt_regs *regs) | ||
204 | { | ||
205 | /* See if any machine dependent calls */ | ||
206 | if (ppc_md.system_reset_exception) | ||
207 | ppc_md.system_reset_exception(regs); | ||
208 | |||
209 | die("System Reset", regs, SIGABRT); | ||
210 | |||
211 | /* Must die if the interrupt is not recoverable */ | ||
212 | if (!(regs->msr & MSR_RI)) | ||
213 | panic("Unrecoverable System Reset"); | ||
214 | |||
215 | /* What should we do here? We could issue a shutdown or hard reset. */ | ||
216 | } | ||
217 | #endif | ||
218 | |||
219 | /* | ||
220 | * I/O accesses can cause machine checks on powermacs. | ||
221 | * Check if the NIP corresponds to the address of a sync | ||
222 | * instruction for which there is an entry in the exception | ||
223 | * table. | ||
224 | * Note that the 601 only takes a machine check on TEA | ||
225 | * (transfer error ack) signal assertion, and does not | ||
226 | * set any of the top 16 bits of SRR1. | ||
227 | * -- paulus. | ||
228 | */ | ||
229 | static inline int check_io_access(struct pt_regs *regs) | ||
230 | { | ||
231 | #ifdef CONFIG_PPC_PMAC | ||
232 | unsigned long msr = regs->msr; | ||
233 | const struct exception_table_entry *entry; | ||
234 | unsigned int *nip = (unsigned int *)regs->nip; | ||
235 | |||
236 | if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) | ||
237 | && (entry = search_exception_tables(regs->nip)) != NULL) { | ||
238 | /* | ||
239 | * Check that it's a sync instruction, or somewhere | ||
240 | * in the twi; isync; nop sequence that inb/inw/inl uses. | ||
241 | * As the address is in the exception table | ||
242 | * we should be able to read the instr there. | ||
243 | * For the debug message, we look at the preceding | ||
244 | * load or store. | ||
245 | */ | ||
246 | if (*nip == 0x60000000) /* nop */ | ||
247 | nip -= 2; | ||
248 | else if (*nip == 0x4c00012c) /* isync */ | ||
249 | --nip; | ||
250 | if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { | ||
251 | /* sync or twi */ | ||
252 | unsigned int rb; | ||
253 | |||
254 | --nip; | ||
255 | rb = (*nip >> 11) & 0x1f; | ||
256 | printk(KERN_DEBUG "%s bad port %lx at %p\n", | ||
257 | (*nip & 0x100)? "OUT to": "IN from", | ||
258 | regs->gpr[rb] - _IO_BASE, nip); | ||
259 | regs->msr |= MSR_RI; | ||
260 | regs->nip = entry->fixup; | ||
261 | return 1; | ||
262 | } | ||
263 | } | ||
264 | #endif /* CONFIG_PPC_PMAC */ | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
269 | /* On 4xx, the reason for the machine check or program exception | ||
270 | is in the ESR. */ | ||
271 | #define get_reason(regs) ((regs)->dsisr) | ||
272 | #ifndef CONFIG_FSL_BOOKE | ||
273 | #define get_mc_reason(regs) ((regs)->dsisr) | ||
274 | #else | ||
275 | #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) | ||
276 | #endif | ||
277 | #define REASON_FP ESR_FP | ||
278 | #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) | ||
279 | #define REASON_PRIVILEGED ESR_PPR | ||
280 | #define REASON_TRAP ESR_PTR | ||
281 | |||
282 | /* single-step stuff */ | ||
283 | #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) | ||
284 | #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) | ||
285 | |||
286 | #else | ||
287 | /* On non-4xx, the reason for the machine check or program | ||
288 | exception is in the MSR. */ | ||
289 | #define get_reason(regs) ((regs)->msr) | ||
290 | #define get_mc_reason(regs) ((regs)->msr) | ||
291 | #define REASON_FP 0x100000 | ||
292 | #define REASON_ILLEGAL 0x80000 | ||
293 | #define REASON_PRIVILEGED 0x40000 | ||
294 | #define REASON_TRAP 0x20000 | ||
295 | |||
296 | #define single_stepping(regs) ((regs)->msr & MSR_SE) | ||
297 | #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) | ||
298 | #endif | ||
299 | |||
300 | /* | ||
301 | * This is "fall-back" implementation for configurations | ||
302 | * which don't provide platform-specific machine check info | ||
303 | */ | ||
304 | void __attribute__ ((weak)) | ||
305 | platform_machine_check(struct pt_regs *regs) | ||
306 | { | ||
307 | } | ||
308 | |||
309 | void MachineCheckException(struct pt_regs *regs) | ||
310 | { | ||
311 | #ifdef CONFIG_PPC64 | ||
312 | int recover = 0; | ||
313 | |||
314 | /* See if any machine dependent calls */ | ||
315 | if (ppc_md.machine_check_exception) | ||
316 | recover = ppc_md.machine_check_exception(regs); | ||
317 | |||
318 | if (recover) | ||
319 | return; | ||
320 | #else | ||
321 | unsigned long reason = get_mc_reason(regs); | ||
322 | |||
323 | if (user_mode(regs)) { | ||
324 | regs->msr |= MSR_RI; | ||
325 | _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); | ||
326 | return; | ||
327 | } | ||
328 | |||
329 | #if defined(CONFIG_8xx) && defined(CONFIG_PCI) | ||
330 | /* the qspan pci read routines can cause machine checks -- Cort */ | ||
331 | bad_page_fault(regs, regs->dar, SIGBUS); | ||
332 | return; | ||
333 | #endif | ||
334 | |||
335 | if (debugger_fault_handler(regs)) { | ||
336 | regs->msr |= MSR_RI; | ||
337 | return; | ||
338 | } | ||
339 | |||
340 | if (check_io_access(regs)) | ||
341 | return; | ||
342 | |||
343 | #if defined(CONFIG_4xx) && !defined(CONFIG_440A) | ||
344 | if (reason & ESR_IMCP) { | ||
345 | printk("Instruction"); | ||
346 | mtspr(SPRN_ESR, reason & ~ESR_IMCP); | ||
347 | } else | ||
348 | printk("Data"); | ||
349 | printk(" machine check in kernel mode.\n"); | ||
350 | #elif defined(CONFIG_440A) | ||
351 | printk("Machine check in kernel mode.\n"); | ||
352 | if (reason & ESR_IMCP){ | ||
353 | printk("Instruction Synchronous Machine Check exception\n"); | ||
354 | mtspr(SPRN_ESR, reason & ~ESR_IMCP); | ||
355 | } | ||
356 | else { | ||
357 | u32 mcsr = mfspr(SPRN_MCSR); | ||
358 | if (mcsr & MCSR_IB) | ||
359 | printk("Instruction Read PLB Error\n"); | ||
360 | if (mcsr & MCSR_DRB) | ||
361 | printk("Data Read PLB Error\n"); | ||
362 | if (mcsr & MCSR_DWB) | ||
363 | printk("Data Write PLB Error\n"); | ||
364 | if (mcsr & MCSR_TLBP) | ||
365 | printk("TLB Parity Error\n"); | ||
366 | if (mcsr & MCSR_ICP){ | ||
367 | flush_instruction_cache(); | ||
368 | printk("I-Cache Parity Error\n"); | ||
369 | } | ||
370 | if (mcsr & MCSR_DCSP) | ||
371 | printk("D-Cache Search Parity Error\n"); | ||
372 | if (mcsr & MCSR_DCFP) | ||
373 | printk("D-Cache Flush Parity Error\n"); | ||
374 | if (mcsr & MCSR_IMPE) | ||
375 | printk("Machine Check exception is imprecise\n"); | ||
376 | |||
377 | /* Clear MCSR */ | ||
378 | mtspr(SPRN_MCSR, mcsr); | ||
379 | } | ||
380 | #elif defined (CONFIG_E500) | ||
381 | printk("Machine check in kernel mode.\n"); | ||
382 | printk("Caused by (from MCSR=%lx): ", reason); | ||
383 | |||
384 | if (reason & MCSR_MCP) | ||
385 | printk("Machine Check Signal\n"); | ||
386 | if (reason & MCSR_ICPERR) | ||
387 | printk("Instruction Cache Parity Error\n"); | ||
388 | if (reason & MCSR_DCP_PERR) | ||
389 | printk("Data Cache Push Parity Error\n"); | ||
390 | if (reason & MCSR_DCPERR) | ||
391 | printk("Data Cache Parity Error\n"); | ||
392 | if (reason & MCSR_GL_CI) | ||
393 | printk("Guarded Load or Cache-Inhibited stwcx.\n"); | ||
394 | if (reason & MCSR_BUS_IAERR) | ||
395 | printk("Bus - Instruction Address Error\n"); | ||
396 | if (reason & MCSR_BUS_RAERR) | ||
397 | printk("Bus - Read Address Error\n"); | ||
398 | if (reason & MCSR_BUS_WAERR) | ||
399 | printk("Bus - Write Address Error\n"); | ||
400 | if (reason & MCSR_BUS_IBERR) | ||
401 | printk("Bus - Instruction Data Error\n"); | ||
402 | if (reason & MCSR_BUS_RBERR) | ||
403 | printk("Bus - Read Data Bus Error\n"); | ||
404 | if (reason & MCSR_BUS_WBERR) | ||
405 | printk("Bus - Read Data Bus Error\n"); | ||
406 | if (reason & MCSR_BUS_IPERR) | ||
407 | printk("Bus - Instruction Parity Error\n"); | ||
408 | if (reason & MCSR_BUS_RPERR) | ||
409 | printk("Bus - Read Parity Error\n"); | ||
410 | #elif defined (CONFIG_E200) | ||
411 | printk("Machine check in kernel mode.\n"); | ||
412 | printk("Caused by (from MCSR=%lx): ", reason); | ||
413 | |||
414 | if (reason & MCSR_MCP) | ||
415 | printk("Machine Check Signal\n"); | ||
416 | if (reason & MCSR_CP_PERR) | ||
417 | printk("Cache Push Parity Error\n"); | ||
418 | if (reason & MCSR_CPERR) | ||
419 | printk("Cache Parity Error\n"); | ||
420 | if (reason & MCSR_EXCP_ERR) | ||
421 | printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); | ||
422 | if (reason & MCSR_BUS_IRERR) | ||
423 | printk("Bus - Read Bus Error on instruction fetch\n"); | ||
424 | if (reason & MCSR_BUS_DRERR) | ||
425 | printk("Bus - Read Bus Error on data load\n"); | ||
426 | if (reason & MCSR_BUS_WRERR) | ||
427 | printk("Bus - Write Bus Error on buffered store or cache line push\n"); | ||
428 | #else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */ | ||
429 | printk("Machine check in kernel mode.\n"); | ||
430 | printk("Caused by (from SRR1=%lx): ", reason); | ||
431 | switch (reason & 0x601F0000) { | ||
432 | case 0x80000: | ||
433 | printk("Machine check signal\n"); | ||
434 | break; | ||
435 | case 0: /* for 601 */ | ||
436 | case 0x40000: | ||
437 | case 0x140000: /* 7450 MSS error and TEA */ | ||
438 | printk("Transfer error ack signal\n"); | ||
439 | break; | ||
440 | case 0x20000: | ||
441 | printk("Data parity error signal\n"); | ||
442 | break; | ||
443 | case 0x10000: | ||
444 | printk("Address parity error signal\n"); | ||
445 | break; | ||
446 | case 0x20000000: | ||
447 | printk("L1 Data Cache error\n"); | ||
448 | break; | ||
449 | case 0x40000000: | ||
450 | printk("L1 Instruction Cache error\n"); | ||
451 | break; | ||
452 | case 0x00100000: | ||
453 | printk("L2 data cache parity error\n"); | ||
454 | break; | ||
455 | default: | ||
456 | printk("Unknown values in msr\n"); | ||
457 | } | ||
458 | #endif /* CONFIG_4xx */ | ||
459 | |||
460 | /* | ||
461 | * Optional platform-provided routine to print out | ||
462 | * additional info, e.g. bus error registers. | ||
463 | */ | ||
464 | platform_machine_check(regs); | ||
465 | #endif /* CONFIG_PPC64 */ | ||
466 | |||
467 | if (debugger_fault_handler(regs)) | ||
468 | return; | ||
469 | die("Machine check", regs, SIGBUS); | ||
470 | |||
471 | /* Must die if the interrupt is not recoverable */ | ||
472 | if (!(regs->msr & MSR_RI)) | ||
473 | panic("Unrecoverable Machine check"); | ||
474 | } | ||
475 | |||
476 | void SMIException(struct pt_regs *regs) | ||
477 | { | ||
478 | die("System Management Interrupt", regs, SIGABRT); | ||
479 | } | ||
480 | |||
481 | void UnknownException(struct pt_regs *regs) | ||
482 | { | ||
483 | printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", | ||
484 | regs->nip, regs->msr, regs->trap); | ||
485 | |||
486 | _exception(SIGTRAP, regs, 0, 0); | ||
487 | } | ||
488 | |||
489 | void InstructionBreakpoint(struct pt_regs *regs) | ||
490 | { | ||
491 | if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, | ||
492 | 5, SIGTRAP) == NOTIFY_STOP) | ||
493 | return; | ||
494 | if (debugger_iabr_match(regs)) | ||
495 | return; | ||
496 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); | ||
497 | } | ||
498 | |||
499 | void RunModeException(struct pt_regs *regs) | ||
500 | { | ||
501 | _exception(SIGTRAP, regs, 0, 0); | ||
502 | } | ||
503 | |||
504 | void SingleStepException(struct pt_regs *regs) | ||
505 | { | ||
506 | regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */ | ||
507 | |||
508 | if (notify_die(DIE_SSTEP, "single_step", regs, 5, | ||
509 | 5, SIGTRAP) == NOTIFY_STOP) | ||
510 | return; | ||
511 | if (debugger_sstep(regs)) | ||
512 | return; | ||
513 | |||
514 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); | ||
515 | } | ||
516 | |||
517 | /* | ||
518 | * After we have successfully emulated an instruction, we have to | ||
519 | * check if the instruction was being single-stepped, and if so, | ||
520 | * pretend we got a single-step exception. This was pointed out | ||
521 | * by Kumar Gala. -- paulus | ||
522 | */ | ||
523 | static void emulate_single_step(struct pt_regs *regs) | ||
524 | { | ||
525 | if (single_stepping(regs)) { | ||
526 | clear_single_step(regs); | ||
527 | _exception(SIGTRAP, regs, TRAP_TRACE, 0); | ||
528 | } | ||
529 | } | ||
530 | |||
531 | /* Illegal instruction emulation support. Originally written to | ||
532 | * provide the PVR to user applications using the mfspr rd, PVR. | ||
533 | * Return non-zero if we can't emulate, or -EFAULT if the associated | ||
534 | * memory access caused an access fault. Return zero on success. | ||
535 | * | ||
536 | * There are a couple of ways to do this, either "decode" the instruction | ||
537 | * or directly match lots of bits. In this case, matching lots of | ||
538 | * bits is faster and easier. | ||
539 | * | ||
540 | */ | ||
541 | #define INST_MFSPR_PVR 0x7c1f42a6 | ||
542 | #define INST_MFSPR_PVR_MASK 0xfc1fffff | ||
543 | |||
544 | #define INST_DCBA 0x7c0005ec | ||
545 | #define INST_DCBA_MASK 0x7c0007fe | ||
546 | |||
547 | #define INST_MCRXR 0x7c000400 | ||
548 | #define INST_MCRXR_MASK 0x7c0007fe | ||
549 | |||
550 | #define INST_STRING 0x7c00042a | ||
551 | #define INST_STRING_MASK 0x7c0007fe | ||
552 | #define INST_STRING_GEN_MASK 0x7c00067e | ||
553 | #define INST_LSWI 0x7c0004aa | ||
554 | #define INST_LSWX 0x7c00042a | ||
555 | #define INST_STSWI 0x7c0005aa | ||
556 | #define INST_STSWX 0x7c00052a | ||
557 | |||
558 | static int emulate_string_inst(struct pt_regs *regs, u32 instword) | ||
559 | { | ||
560 | u8 rT = (instword >> 21) & 0x1f; | ||
561 | u8 rA = (instword >> 16) & 0x1f; | ||
562 | u8 NB_RB = (instword >> 11) & 0x1f; | ||
563 | u32 num_bytes; | ||
564 | unsigned long EA; | ||
565 | int pos = 0; | ||
566 | |||
567 | /* Early out if we are an invalid form of lswx */ | ||
568 | if ((instword & INST_STRING_MASK) == INST_LSWX) | ||
569 | if ((rT == rA) || (rT == NB_RB)) | ||
570 | return -EINVAL; | ||
571 | |||
572 | EA = (rA == 0) ? 0 : regs->gpr[rA]; | ||
573 | |||
574 | switch (instword & INST_STRING_MASK) { | ||
575 | case INST_LSWX: | ||
576 | case INST_STSWX: | ||
577 | EA += NB_RB; | ||
578 | num_bytes = regs->xer & 0x7f; | ||
579 | break; | ||
580 | case INST_LSWI: | ||
581 | case INST_STSWI: | ||
582 | num_bytes = (NB_RB == 0) ? 32 : NB_RB; | ||
583 | break; | ||
584 | default: | ||
585 | return -EINVAL; | ||
586 | } | ||
587 | |||
588 | while (num_bytes != 0) | ||
589 | { | ||
590 | u8 val; | ||
591 | u32 shift = 8 * (3 - (pos & 0x3)); | ||
592 | |||
593 | switch ((instword & INST_STRING_MASK)) { | ||
594 | case INST_LSWX: | ||
595 | case INST_LSWI: | ||
596 | if (get_user(val, (u8 __user *)EA)) | ||
597 | return -EFAULT; | ||
598 | /* first time updating this reg, | ||
599 | * zero it out */ | ||
600 | if (pos == 0) | ||
601 | regs->gpr[rT] = 0; | ||
602 | regs->gpr[rT] |= val << shift; | ||
603 | break; | ||
604 | case INST_STSWI: | ||
605 | case INST_STSWX: | ||
606 | val = regs->gpr[rT] >> shift; | ||
607 | if (put_user(val, (u8 __user *)EA)) | ||
608 | return -EFAULT; | ||
609 | break; | ||
610 | } | ||
611 | /* move EA to next address */ | ||
612 | EA += 1; | ||
613 | num_bytes--; | ||
614 | |||
615 | /* manage our position within the register */ | ||
616 | if (++pos == 4) { | ||
617 | pos = 0; | ||
618 | if (++rT == 32) | ||
619 | rT = 0; | ||
620 | } | ||
621 | } | ||
622 | |||
623 | return 0; | ||
624 | } | ||
625 | |||
626 | static int emulate_instruction(struct pt_regs *regs) | ||
627 | { | ||
628 | u32 instword; | ||
629 | u32 rd; | ||
630 | |||
631 | if (!user_mode(regs)) | ||
632 | return -EINVAL; | ||
633 | CHECK_FULL_REGS(regs); | ||
634 | |||
635 | if (get_user(instword, (u32 __user *)(regs->nip))) | ||
636 | return -EFAULT; | ||
637 | |||
638 | /* Emulate the mfspr rD, PVR. */ | ||
639 | if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) { | ||
640 | rd = (instword >> 21) & 0x1f; | ||
641 | regs->gpr[rd] = mfspr(SPRN_PVR); | ||
642 | return 0; | ||
643 | } | ||
644 | |||
645 | /* Emulating the dcba insn is just a no-op. */ | ||
646 | if ((instword & INST_DCBA_MASK) == INST_DCBA) | ||
647 | return 0; | ||
648 | |||
649 | /* Emulate the mcrxr insn. */ | ||
650 | if ((instword & INST_MCRXR_MASK) == INST_MCRXR) { | ||
651 | int shift = (instword >> 21) & 0x1c; | ||
652 | unsigned long msk = 0xf0000000UL >> shift; | ||
653 | |||
654 | regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); | ||
655 | regs->xer &= ~0xf0000000UL; | ||
656 | return 0; | ||
657 | } | ||
658 | |||
659 | /* Emulate load/store string insn. */ | ||
660 | if ((instword & INST_STRING_GEN_MASK) == INST_STRING) | ||
661 | return emulate_string_inst(regs, instword); | ||
662 | |||
663 | return -EINVAL; | ||
664 | } | ||
665 | |||
666 | /* | ||
667 | * Look through the list of trap instructions that are used for BUG(), | ||
668 | * BUG_ON() and WARN_ON() and see if we hit one. At this point we know | ||
669 | * that the exception was caused by a trap instruction of some kind. | ||
670 | * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0 | ||
671 | * otherwise. | ||
672 | */ | ||
673 | extern struct bug_entry __start___bug_table[], __stop___bug_table[]; | ||
674 | |||
675 | #ifndef CONFIG_MODULES | ||
676 | #define module_find_bug(x) NULL | ||
677 | #endif | ||
678 | |||
679 | struct bug_entry *find_bug(unsigned long bugaddr) | ||
680 | { | ||
681 | struct bug_entry *bug; | ||
682 | |||
683 | for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) | ||
684 | if (bugaddr == bug->bug_addr) | ||
685 | return bug; | ||
686 | return module_find_bug(bugaddr); | ||
687 | } | ||
688 | |||
689 | int check_bug_trap(struct pt_regs *regs) | ||
690 | { | ||
691 | struct bug_entry *bug; | ||
692 | unsigned long addr; | ||
693 | |||
694 | if (regs->msr & MSR_PR) | ||
695 | return 0; /* not in kernel */ | ||
696 | addr = regs->nip; /* address of trap instruction */ | ||
697 | if (addr < PAGE_OFFSET) | ||
698 | return 0; | ||
699 | bug = find_bug(regs->nip); | ||
700 | if (bug == NULL) | ||
701 | return 0; | ||
702 | if (bug->line & BUG_WARNING_TRAP) { | ||
703 | /* this is a WARN_ON rather than BUG/BUG_ON */ | ||
704 | #ifdef CONFIG_XMON | ||
705 | xmon_printf(KERN_ERR "Badness in %s at %s:%d\n", | ||
706 | bug->function, bug->file, | ||
707 | bug->line & ~BUG_WARNING_TRAP); | ||
708 | #endif /* CONFIG_XMON */ | ||
709 | printk(KERN_ERR "Badness in %s at %s:%d\n", | ||
710 | bug->function, bug->file, | ||
711 | bug->line & ~BUG_WARNING_TRAP); | ||
712 | dump_stack(); | ||
713 | return 1; | ||
714 | } | ||
715 | #ifdef CONFIG_XMON | ||
716 | xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%d!\n", | ||
717 | bug->function, bug->file, bug->line); | ||
718 | xmon(regs); | ||
719 | #endif /* CONFIG_XMON */ | ||
720 | printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n", | ||
721 | bug->function, bug->file, bug->line); | ||
722 | |||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | void ProgramCheckException(struct pt_regs *regs) | ||
727 | { | ||
728 | unsigned int reason = get_reason(regs); | ||
729 | extern int do_mathemu(struct pt_regs *regs); | ||
730 | |||
731 | #ifdef CONFIG_MATH_EMULATION | ||
732 | /* (reason & REASON_ILLEGAL) would be the obvious thing here, | ||
733 | * but there seems to be a hardware bug on the 405GP (RevD) | ||
734 | * that means ESR is sometimes set incorrectly - either to | ||
735 | * ESR_DST (!?) or 0. In the process of chasing this with the | ||
736 | * hardware people - not sure if it can happen on any illegal | ||
737 | * instruction or only on FP instructions, whether there is a | ||
738 | * pattern to occurences etc. -dgibson 31/Mar/2003 */ | ||
739 | if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) { | ||
740 | emulate_single_step(regs); | ||
741 | return; | ||
742 | } | ||
743 | #endif /* CONFIG_MATH_EMULATION */ | ||
744 | |||
745 | if (reason & REASON_FP) { | ||
746 | /* IEEE FP exception */ | ||
747 | int code = 0; | ||
748 | u32 fpscr; | ||
749 | |||
750 | /* We must make sure the FP state is consistent with | ||
751 | * our MSR_FP in regs | ||
752 | */ | ||
753 | preempt_disable(); | ||
754 | if (regs->msr & MSR_FP) | ||
755 | giveup_fpu(current); | ||
756 | preempt_enable(); | ||
757 | |||
758 | fpscr = current->thread.fpscr; | ||
759 | fpscr &= fpscr << 22; /* mask summary bits with enables */ | ||
760 | if (fpscr & FPSCR_VX) | ||
761 | code = FPE_FLTINV; | ||
762 | else if (fpscr & FPSCR_OX) | ||
763 | code = FPE_FLTOVF; | ||
764 | else if (fpscr & FPSCR_UX) | ||
765 | code = FPE_FLTUND; | ||
766 | else if (fpscr & FPSCR_ZX) | ||
767 | code = FPE_FLTDIV; | ||
768 | else if (fpscr & FPSCR_XX) | ||
769 | code = FPE_FLTRES; | ||
770 | _exception(SIGFPE, regs, code, regs->nip); | ||
771 | return; | ||
772 | } | ||
773 | |||
774 | if (reason & REASON_TRAP) { | ||
775 | /* trap exception */ | ||
776 | if (debugger_bpt(regs)) | ||
777 | return; | ||
778 | if (check_bug_trap(regs)) { | ||
779 | regs->nip += 4; | ||
780 | return; | ||
781 | } | ||
782 | _exception(SIGTRAP, regs, TRAP_BRKPT, 0); | ||
783 | return; | ||
784 | } | ||
785 | |||
786 | /* Try to emulate it if we should. */ | ||
787 | if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { | ||
788 | switch (emulate_instruction(regs)) { | ||
789 | case 0: | ||
790 | regs->nip += 4; | ||
791 | emulate_single_step(regs); | ||
792 | return; | ||
793 | case -EFAULT: | ||
794 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); | ||
795 | return; | ||
796 | } | ||
797 | } | ||
798 | |||
799 | if (reason & REASON_PRIVILEGED) | ||
800 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); | ||
801 | else | ||
802 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | ||
803 | } | ||
804 | |||
805 | void AlignmentException(struct pt_regs *regs) | ||
806 | { | ||
807 | int fixed; | ||
808 | |||
809 | fixed = fix_alignment(regs); | ||
810 | |||
811 | if (fixed == 1) { | ||
812 | regs->nip += 4; /* skip over emulated instruction */ | ||
813 | emulate_single_step(regs); | ||
814 | return; | ||
815 | } | ||
816 | |||
817 | /* Operand address was bad */ | ||
818 | if (fixed == -EFAULT) { | ||
819 | if (user_mode(regs)) | ||
820 | _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar); | ||
821 | else | ||
822 | /* Search exception table */ | ||
823 | bad_page_fault(regs, regs->dar, SIGSEGV); | ||
824 | return; | ||
825 | } | ||
826 | _exception(SIGBUS, regs, BUS_ADRALN, regs->dar); | ||
827 | } | ||
828 | |||
829 | void StackOverflow(struct pt_regs *regs) | ||
830 | { | ||
831 | printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", | ||
832 | current, regs->gpr[1]); | ||
833 | debugger(regs); | ||
834 | show_regs(regs); | ||
835 | panic("kernel stack overflow"); | ||
836 | } | ||
837 | |||
838 | void nonrecoverable_exception(struct pt_regs *regs) | ||
839 | { | ||
840 | printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", | ||
841 | regs->nip, regs->msr); | ||
842 | debugger(regs); | ||
843 | die("nonrecoverable exception", regs, SIGKILL); | ||
844 | } | ||
845 | |||
846 | void trace_syscall(struct pt_regs *regs) | ||
847 | { | ||
848 | printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", | ||
849 | current, current->pid, regs->nip, regs->link, regs->gpr[0], | ||
850 | regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); | ||
851 | } | ||
852 | |||
853 | #ifdef CONFIG_8xx | ||
854 | void SoftwareEmulation(struct pt_regs *regs) | ||
855 | { | ||
856 | extern int do_mathemu(struct pt_regs *); | ||
857 | extern int Soft_emulate_8xx(struct pt_regs *); | ||
858 | int errcode; | ||
859 | |||
860 | CHECK_FULL_REGS(regs); | ||
861 | |||
862 | if (!user_mode(regs)) { | ||
863 | debugger(regs); | ||
864 | die("Kernel Mode Software FPU Emulation", regs, SIGFPE); | ||
865 | } | ||
866 | |||
867 | #ifdef CONFIG_MATH_EMULATION | ||
868 | errcode = do_mathemu(regs); | ||
869 | #else | ||
870 | errcode = Soft_emulate_8xx(regs); | ||
871 | #endif | ||
872 | if (errcode) { | ||
873 | if (errcode > 0) | ||
874 | _exception(SIGFPE, regs, 0, 0); | ||
875 | else if (errcode == -EFAULT) | ||
876 | _exception(SIGSEGV, regs, 0, 0); | ||
877 | else | ||
878 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | ||
879 | } else | ||
880 | emulate_single_step(regs); | ||
881 | } | ||
882 | #endif /* CONFIG_8xx */ | ||
883 | |||
884 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | ||
885 | |||
886 | void DebugException(struct pt_regs *regs, unsigned long debug_status) | ||
887 | { | ||
888 | if (debug_status & DBSR_IC) { /* instruction completion */ | ||
889 | regs->msr &= ~MSR_DE; | ||
890 | if (user_mode(regs)) { | ||
891 | current->thread.dbcr0 &= ~DBCR0_IC; | ||
892 | } else { | ||
893 | /* Disable instruction completion */ | ||
894 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); | ||
895 | /* Clear the instruction completion event */ | ||
896 | mtspr(SPRN_DBSR, DBSR_IC); | ||
897 | if (debugger_sstep(regs)) | ||
898 | return; | ||
899 | } | ||
900 | _exception(SIGTRAP, regs, TRAP_TRACE, 0); | ||
901 | } | ||
902 | } | ||
903 | #endif /* CONFIG_4xx || CONFIG_BOOKE */ | ||
904 | |||
905 | #if !defined(CONFIG_TAU_INT) | ||
906 | void TAUException(struct pt_regs *regs) | ||
907 | { | ||
908 | printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", | ||
909 | regs->nip, regs->msr, regs->trap, print_tainted()); | ||
910 | } | ||
911 | #endif /* CONFIG_INT_TAU */ | ||
912 | |||
913 | void AltivecUnavailException(struct pt_regs *regs) | ||
914 | { | ||
915 | static int kernel_altivec_count; | ||
916 | |||
917 | #ifndef CONFIG_ALTIVEC | ||
918 | if (user_mode(regs)) { | ||
919 | /* A user program has executed an altivec instruction, | ||
920 | but this kernel doesn't support altivec. */ | ||
921 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | ||
922 | return; | ||
923 | } | ||
924 | #endif | ||
925 | /* The kernel has executed an altivec instruction without | ||
926 | first enabling altivec. Whinge but let it do it. */ | ||
927 | if (++kernel_altivec_count < 10) | ||
928 | printk(KERN_ERR "AltiVec used in kernel (task=%p, pc=%lx)\n", | ||
929 | current, regs->nip); | ||
930 | regs->msr |= MSR_VEC; | ||
931 | } | ||
932 | |||
933 | #ifdef CONFIG_ALTIVEC | ||
934 | void AltivecAssistException(struct pt_regs *regs) | ||
935 | { | ||
936 | int err; | ||
937 | |||
938 | preempt_disable(); | ||
939 | if (regs->msr & MSR_VEC) | ||
940 | giveup_altivec(current); | ||
941 | preempt_enable(); | ||
942 | if (!user_mode(regs)) { | ||
943 | printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" | ||
944 | " at %lx\n", regs->nip); | ||
945 | die("Kernel Altivec assist exception", regs, SIGILL); | ||
946 | } | ||
947 | |||
948 | err = emulate_altivec(regs); | ||
949 | if (err == 0) { | ||
950 | regs->nip += 4; /* skip emulated instruction */ | ||
951 | emulate_single_step(regs); | ||
952 | return; | ||
953 | } | ||
954 | |||
955 | if (err == -EFAULT) { | ||
956 | /* got an error reading the instruction */ | ||
957 | _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); | ||
958 | } else { | ||
959 | /* didn't recognize the instruction */ | ||
960 | /* XXX quick hack for now: set the non-Java bit in the VSCR */ | ||
961 | if (printk_ratelimit()) | ||
962 | printk(KERN_ERR "Unrecognized altivec instruction " | ||
963 | "in %s at %lx\n", current->comm, regs->nip); | ||
964 | current->thread.vscr.u[3] |= 0x10000; | ||
965 | } | ||
966 | } | ||
967 | #endif /* CONFIG_ALTIVEC */ | ||
968 | |||
969 | #ifdef CONFIG_E500 | ||
970 | void PerformanceMonitorException(struct pt_regs *regs) | ||
971 | { | ||
972 | perf_irq(regs); | ||
973 | } | ||
974 | #endif | ||
975 | |||
976 | #ifdef CONFIG_FSL_BOOKE | ||
977 | void CacheLockingException(struct pt_regs *regs, unsigned long address, | ||
978 | unsigned long error_code) | ||
979 | { | ||
980 | /* We treat cache locking instructions from the user | ||
981 | * as priv ops, in the future we could try to do | ||
982 | * something smarter | ||
983 | */ | ||
984 | if (error_code & (ESR_DLK|ESR_ILK)) | ||
985 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); | ||
986 | return; | ||
987 | } | ||
988 | #endif /* CONFIG_FSL_BOOKE */ | ||
989 | |||
990 | #ifdef CONFIG_SPE | ||
991 | void SPEFloatingPointException(struct pt_regs *regs) | ||
992 | { | ||
993 | unsigned long spefscr; | ||
994 | int fpexc_mode; | ||
995 | int code = 0; | ||
996 | |||
997 | spefscr = current->thread.spefscr; | ||
998 | fpexc_mode = current->thread.fpexc_mode; | ||
999 | |||
1000 | /* Hardware does not neccessarily set sticky | ||
1001 | * underflow/overflow/invalid flags */ | ||
1002 | if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { | ||
1003 | code = FPE_FLTOVF; | ||
1004 | spefscr |= SPEFSCR_FOVFS; | ||
1005 | } | ||
1006 | else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { | ||
1007 | code = FPE_FLTUND; | ||
1008 | spefscr |= SPEFSCR_FUNFS; | ||
1009 | } | ||
1010 | else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) | ||
1011 | code = FPE_FLTDIV; | ||
1012 | else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { | ||
1013 | code = FPE_FLTINV; | ||
1014 | spefscr |= SPEFSCR_FINVS; | ||
1015 | } | ||
1016 | else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) | ||
1017 | code = FPE_FLTRES; | ||
1018 | |||
1019 | current->thread.spefscr = spefscr; | ||
1020 | |||
1021 | _exception(SIGFPE, regs, code, regs->nip); | ||
1022 | return; | ||
1023 | } | ||
1024 | #endif | ||
1025 | |||
1026 | #ifdef CONFIG_BOOKE_WDT | ||
1027 | /* | ||
1028 | * Default handler for a Watchdog exception, | ||
1029 | * spins until a reboot occurs | ||
1030 | */ | ||
1031 | void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) | ||
1032 | { | ||
1033 | /* Generic WatchdogHandler, implement your own */ | ||
1034 | mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); | ||
1035 | return; | ||
1036 | } | ||
1037 | |||
1038 | void WatchdogException(struct pt_regs *regs) | ||
1039 | { | ||
1040 | printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); | ||
1041 | WatchdogHandler(regs); | ||
1042 | } | ||
1043 | #endif | ||
1044 | |||
1045 | void __init trap_init(void) | ||
1046 | { | ||
1047 | } | ||
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S new file mode 100644 index 000000000000..12cb90bc209c --- /dev/null +++ b/arch/powerpc/kernel/vector.S | |||
@@ -0,0 +1,197 @@ | |||
1 | #include <linux/config.h> | ||
2 | #include <asm/ppc_asm.h> | ||
3 | #include <asm/processor.h> | ||
4 | |||
5 | /* | ||
6 | * The routines below are in assembler so we can closely control the | ||
7 | * usage of floating-point registers. These routines must be called | ||
8 | * with preempt disabled. | ||
9 | */ | ||
10 | #ifdef CONFIG_PPC32 | ||
11 | .data | ||
12 | fpzero: | ||
13 | .long 0 | ||
14 | fpone: | ||
15 | .long 0x3f800000 /* 1.0 in single-precision FP */ | ||
16 | fphalf: | ||
17 | .long 0x3f000000 /* 0.5 in single-precision FP */ | ||
18 | |||
19 | #define LDCONST(fr, name) \ | ||
20 | lis r11,name@ha; \ | ||
21 | lfs fr,name@l(r11) | ||
22 | #else | ||
23 | |||
24 | .section ".toc","aw" | ||
25 | fpzero: | ||
26 | .tc FD_0_0[TC],0 | ||
27 | fpone: | ||
28 | .tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */ | ||
29 | fphalf: | ||
30 | .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */ | ||
31 | |||
32 | #define LDCONST(fr, name) \ | ||
33 | lfd fr,name@toc(r2) | ||
34 | #endif | ||
35 | |||
36 | .text | ||
37 | /* | ||
38 | * Internal routine to enable floating point and set FPSCR to 0. | ||
39 | * Don't call it from C; it doesn't use the normal calling convention. | ||
40 | */ | ||
41 | fpenable: | ||
42 | #ifdef CONFIG_PPC32 | ||
43 | stwu r1,-64(r1) | ||
44 | #else | ||
45 | stdu r1,-64(r1) | ||
46 | #endif | ||
47 | mfmsr r10 | ||
48 | ori r11,r10,MSR_FP | ||
49 | mtmsr r11 | ||
50 | isync | ||
51 | stfd fr0,24(r1) | ||
52 | stfd fr1,16(r1) | ||
53 | stfd fr31,8(r1) | ||
54 | LDCONST(fr1, fpzero) | ||
55 | mffs fr31 | ||
56 | mtfsf 0xff,fr1 | ||
57 | blr | ||
58 | |||
59 | fpdisable: | ||
60 | mtlr r12 | ||
61 | mtfsf 0xff,fr31 | ||
62 | lfd fr31,8(r1) | ||
63 | lfd fr1,16(r1) | ||
64 | lfd fr0,24(r1) | ||
65 | mtmsr r10 | ||
66 | isync | ||
67 | addi r1,r1,64 | ||
68 | blr | ||
69 | |||
70 | /* | ||
71 | * Vector add, floating point. | ||
72 | */ | ||
73 | _GLOBAL(vaddfp) | ||
74 | mflr r12 | ||
75 | bl fpenable | ||
76 | li r0,4 | ||
77 | mtctr r0 | ||
78 | li r6,0 | ||
79 | 1: lfsx fr0,r4,r6 | ||
80 | lfsx fr1,r5,r6 | ||
81 | fadds fr0,fr0,fr1 | ||
82 | stfsx fr0,r3,r6 | ||
83 | addi r6,r6,4 | ||
84 | bdnz 1b | ||
85 | b fpdisable | ||
86 | |||
87 | /* | ||
88 | * Vector subtract, floating point. | ||
89 | */ | ||
90 | _GLOBAL(vsubfp) | ||
91 | mflr r12 | ||
92 | bl fpenable | ||
93 | li r0,4 | ||
94 | mtctr r0 | ||
95 | li r6,0 | ||
96 | 1: lfsx fr0,r4,r6 | ||
97 | lfsx fr1,r5,r6 | ||
98 | fsubs fr0,fr0,fr1 | ||
99 | stfsx fr0,r3,r6 | ||
100 | addi r6,r6,4 | ||
101 | bdnz 1b | ||
102 | b fpdisable | ||
103 | |||
104 | /* | ||
105 | * Vector multiply and add, floating point. | ||
106 | */ | ||
107 | _GLOBAL(vmaddfp) | ||
108 | mflr r12 | ||
109 | bl fpenable | ||
110 | stfd fr2,32(r1) | ||
111 | li r0,4 | ||
112 | mtctr r0 | ||
113 | li r7,0 | ||
114 | 1: lfsx fr0,r4,r7 | ||
115 | lfsx fr1,r5,r7 | ||
116 | lfsx fr2,r6,r7 | ||
117 | fmadds fr0,fr0,fr2,fr1 | ||
118 | stfsx fr0,r3,r7 | ||
119 | addi r7,r7,4 | ||
120 | bdnz 1b | ||
121 | lfd fr2,32(r1) | ||
122 | b fpdisable | ||
123 | |||
124 | /* | ||
125 | * Vector negative multiply and subtract, floating point. | ||
126 | */ | ||
127 | _GLOBAL(vnmsubfp) | ||
128 | mflr r12 | ||
129 | bl fpenable | ||
130 | stfd fr2,32(r1) | ||
131 | li r0,4 | ||
132 | mtctr r0 | ||
133 | li r7,0 | ||
134 | 1: lfsx fr0,r4,r7 | ||
135 | lfsx fr1,r5,r7 | ||
136 | lfsx fr2,r6,r7 | ||
137 | fnmsubs fr0,fr0,fr2,fr1 | ||
138 | stfsx fr0,r3,r7 | ||
139 | addi r7,r7,4 | ||
140 | bdnz 1b | ||
141 | lfd fr2,32(r1) | ||
142 | b fpdisable | ||
143 | |||
144 | /* | ||
145 | * Vector reciprocal estimate. We just compute 1.0/x. | ||
146 | * r3 -> destination, r4 -> source. | ||
147 | */ | ||
148 | _GLOBAL(vrefp) | ||
149 | mflr r12 | ||
150 | bl fpenable | ||
151 | li r0,4 | ||
152 | LDCONST(fr1, fpone) | ||
153 | mtctr r0 | ||
154 | li r6,0 | ||
155 | 1: lfsx fr0,r4,r6 | ||
156 | fdivs fr0,fr1,fr0 | ||
157 | stfsx fr0,r3,r6 | ||
158 | addi r6,r6,4 | ||
159 | bdnz 1b | ||
160 | b fpdisable | ||
161 | |||
162 | /* | ||
163 | * Vector reciprocal square-root estimate, floating point. | ||
164 | * We use the frsqrte instruction for the initial estimate followed | ||
165 | * by 2 iterations of Newton-Raphson to get sufficient accuracy. | ||
166 | * r3 -> destination, r4 -> source. | ||
167 | */ | ||
168 | _GLOBAL(vrsqrtefp) | ||
169 | mflr r12 | ||
170 | bl fpenable | ||
171 | stfd fr2,32(r1) | ||
172 | stfd fr3,40(r1) | ||
173 | stfd fr4,48(r1) | ||
174 | stfd fr5,56(r1) | ||
175 | li r0,4 | ||
176 | LDCONST(fr4, fpone) | ||
177 | LDCONST(fr5, fphalf) | ||
178 | mtctr r0 | ||
179 | li r6,0 | ||
180 | 1: lfsx fr0,r4,r6 | ||
181 | frsqrte fr1,fr0 /* r = frsqrte(s) */ | ||
182 | fmuls fr3,fr1,fr0 /* r * s */ | ||
183 | fmuls fr2,fr1,fr5 /* r * 0.5 */ | ||
184 | fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ | ||
185 | fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ | ||
186 | fmuls fr3,fr1,fr0 /* r * s */ | ||
187 | fmuls fr2,fr1,fr5 /* r * 0.5 */ | ||
188 | fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ | ||
189 | fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ | ||
190 | stfsx fr1,r3,r6 | ||
191 | addi r6,r6,4 | ||
192 | bdnz 1b | ||
193 | lfd fr5,56(r1) | ||
194 | lfd fr4,48(r1) | ||
195 | lfd fr3,40(r1) | ||
196 | lfd fr2,32(r1) | ||
197 | b fpdisable | ||
diff --git a/arch/powerpc/kernel/vmlinux.lds b/arch/powerpc/kernel/vmlinux.lds new file mode 100644 index 000000000000..d62c288a81d0 --- /dev/null +++ b/arch/powerpc/kernel/vmlinux.lds | |||
@@ -0,0 +1,174 @@ | |||
1 | /* Align . to a 8 byte boundary equals to maximum function alignment. */ | ||
2 | /* sched.text is aling to function alignment to secure we have same | ||
3 | * address even at second ld pass when generating System.map */ | ||
4 | /* spinlock.text is aling to function alignment to secure we have same | ||
5 | * address even at second ld pass when generating System.map */ | ||
6 | /* DWARF debug sections. | ||
7 | Symbols in the DWARF debugging sections are relative to | ||
8 | the beginning of the section so we begin them at 0. */ | ||
9 | /* Stabs debugging sections. */ | ||
10 | OUTPUT_ARCH(powerpc:common) | ||
11 | jiffies = jiffies_64 + 4; | ||
12 | SECTIONS | ||
13 | { | ||
14 | /* Read-only sections, merged into text segment: */ | ||
15 | . = + SIZEOF_HEADERS; | ||
16 | .interp : { *(.interp) } | ||
17 | .hash : { *(.hash) } | ||
18 | .dynsym : { *(.dynsym) } | ||
19 | .dynstr : { *(.dynstr) } | ||
20 | .rel.text : { *(.rel.text) } | ||
21 | .rela.text : { *(.rela.text) } | ||
22 | .rel.data : { *(.rel.data) } | ||
23 | .rela.data : { *(.rela.data) } | ||
24 | .rel.rodata : { *(.rel.rodata) } | ||
25 | .rela.rodata : { *(.rela.rodata) } | ||
26 | .rel.got : { *(.rel.got) } | ||
27 | .rela.got : { *(.rela.got) } | ||
28 | .rel.ctors : { *(.rel.ctors) } | ||
29 | .rela.ctors : { *(.rela.ctors) } | ||
30 | .rel.dtors : { *(.rel.dtors) } | ||
31 | .rela.dtors : { *(.rela.dtors) } | ||
32 | .rel.bss : { *(.rel.bss) } | ||
33 | .rela.bss : { *(.rela.bss) } | ||
34 | .rel.plt : { *(.rel.plt) } | ||
35 | .rela.plt : { *(.rela.plt) } | ||
36 | /* .init : { *(.init) } =0*/ | ||
37 | .plt : { *(.plt) } | ||
38 | .text : | ||
39 | { | ||
40 | *(.text) | ||
41 | . = ALIGN(8); __sched_text_start = .; *(.sched.text) __sched_text_end = .; | ||
42 | . = ALIGN(8); __lock_text_start = .; *(.spinlock.text) __lock_text_end = .; | ||
43 | *(.fixup) | ||
44 | *(.got1) | ||
45 | __got2_start = .; | ||
46 | *(.got2) | ||
47 | __got2_end = .; | ||
48 | } | ||
49 | _etext = .; | ||
50 | PROVIDE (etext = .); | ||
51 | .rodata : AT(ADDR(.rodata) - 0) { *(.rodata) *(.rodata.*) *(__vermagic) } .rodata1 : AT(ADDR(.rodata1) - 0) { *(.rodata1) } .pci_fixup : AT(ADDR(.pci_fixup) - 0) { __start_pci_fixups_early = .; *(.pci_fixup_early) __end_pci_fixups_early = .; __start_pci_fixups_header = .; *(.pci_fixup_header) __end_pci_fixups_header = .; __start_pci_fixups_final = .; *(.pci_fixup_final) __end_pci_fixups_final = .; __start_pci_fixups_enable = .; *(.pci_fixup_enable) __end_pci_fixups_enable = .; } __ksymtab : AT(ADDR(__ksymtab) - 0) { __start___ksymtab = .; *(__ksymtab) __stop___ksymtab = .; } __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - 0) { __start___ksymtab_gpl = .; *(__ksymtab_gpl) __stop___ksymtab_gpl = .; } __kcrctab : AT(ADDR(__kcrctab) - 0) { __start___kcrctab = .; *(__kcrctab) __stop___kcrctab = .; } __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - 0) { __start___kcrctab_gpl = .; *(__kcrctab_gpl) __stop___kcrctab_gpl = .; } __ksymtab_strings : AT(ADDR(__ksymtab_strings) - 0) { *(__ksymtab_strings) } __param : AT(ADDR(__param) - 0) { __start___param = .; *(__param) __stop___param = .; } | ||
52 | .fini : { *(.fini) } =0 | ||
53 | .ctors : { *(.ctors) } | ||
54 | .dtors : { *(.dtors) } | ||
55 | .fixup : { *(.fixup) } | ||
56 | __ex_table : { | ||
57 | __start___ex_table = .; | ||
58 | *(__ex_table) | ||
59 | __stop___ex_table = .; | ||
60 | } | ||
61 | __bug_table : { | ||
62 | __start___bug_table = .; | ||
63 | *(__bug_table) | ||
64 | __stop___bug_table = .; | ||
65 | } | ||
66 | /* Read-write section, merged into data segment: */ | ||
67 | . = ALIGN(4096); | ||
68 | .data : | ||
69 | { | ||
70 | *(.data) | ||
71 | *(.data1) | ||
72 | *(.sdata) | ||
73 | *(.sdata2) | ||
74 | *(.got.plt) *(.got) | ||
75 | *(.dynamic) | ||
76 | CONSTRUCTORS | ||
77 | } | ||
78 | |||
79 | . = ALIGN(4096); | ||
80 | __nosave_begin = .; | ||
81 | .data_nosave : { *(.data.nosave) } | ||
82 | . = ALIGN(4096); | ||
83 | __nosave_end = .; | ||
84 | |||
85 | . = ALIGN(32); | ||
86 | .data.cacheline_aligned : { *(.data.cacheline_aligned) } | ||
87 | |||
88 | _edata = .; | ||
89 | PROVIDE (edata = .); | ||
90 | |||
91 | . = ALIGN(8192); | ||
92 | .data.init_task : { *(.data.init_task) } | ||
93 | |||
94 | . = ALIGN(4096); | ||
95 | __init_begin = .; | ||
96 | .init.text : { | ||
97 | _sinittext = .; | ||
98 | *(.init.text) | ||
99 | _einittext = .; | ||
100 | } | ||
101 | /* .exit.text is discarded at runtime, not link time, | ||
102 | to deal with references from __bug_table */ | ||
103 | .exit.text : { *(.exit.text) } | ||
104 | .init.data : { | ||
105 | *(.init.data); | ||
106 | __vtop_table_begin = .; | ||
107 | *(.vtop_fixup); | ||
108 | __vtop_table_end = .; | ||
109 | __ptov_table_begin = .; | ||
110 | *(.ptov_fixup); | ||
111 | __ptov_table_end = .; | ||
112 | } | ||
113 | . = ALIGN(16); | ||
114 | __setup_start = .; | ||
115 | .init.setup : { *(.init.setup) } | ||
116 | __setup_end = .; | ||
117 | __initcall_start = .; | ||
118 | .initcall.init : { | ||
119 | *(.initcall1.init) | ||
120 | *(.initcall2.init) | ||
121 | *(.initcall3.init) | ||
122 | *(.initcall4.init) | ||
123 | *(.initcall5.init) | ||
124 | *(.initcall6.init) | ||
125 | *(.initcall7.init) | ||
126 | } | ||
127 | __initcall_end = .; | ||
128 | |||
129 | __con_initcall_start = .; | ||
130 | .con_initcall.init : { *(.con_initcall.init) } | ||
131 | __con_initcall_end = .; | ||
132 | |||
133 | .security_initcall.init : AT(ADDR(.security_initcall.init) - 0) { __security_initcall_start = .; *(.security_initcall.init) __security_initcall_end = .; } | ||
134 | |||
135 | __start___ftr_fixup = .; | ||
136 | __ftr_fixup : { *(__ftr_fixup) } | ||
137 | __stop___ftr_fixup = .; | ||
138 | |||
139 | . = ALIGN(32); | ||
140 | __per_cpu_start = .; | ||
141 | .data.percpu : { *(.data.percpu) } | ||
142 | __per_cpu_end = .; | ||
143 | |||
144 | . = ALIGN(4096); | ||
145 | __initramfs_start = .; | ||
146 | .init.ramfs : { *(.init.ramfs) } | ||
147 | __initramfs_end = .; | ||
148 | |||
149 | . = ALIGN(4096); | ||
150 | __init_end = .; | ||
151 | |||
152 | . = ALIGN(4096); | ||
153 | _sextratext = .; | ||
154 | _eextratext = .; | ||
155 | |||
156 | __bss_start = .; | ||
157 | .bss : | ||
158 | { | ||
159 | *(.sbss) *(.scommon) | ||
160 | *(.dynbss) | ||
161 | *(.bss) | ||
162 | *(COMMON) | ||
163 | } | ||
164 | __bss_stop = .; | ||
165 | |||
166 | _end = . ; | ||
167 | PROVIDE (end = .); | ||
168 | |||
169 | /* Sections to be discarded. */ | ||
170 | /DISCARD/ : { | ||
171 | *(.exitcall.exit) | ||
172 | *(.exit.data) | ||
173 | } | ||
174 | } | ||
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..09c6525cfa61 --- /dev/null +++ b/arch/powerpc/kernel/vmlinux.lds.S | |||
@@ -0,0 +1,172 @@ | |||
1 | #include <asm-generic/vmlinux.lds.h> | ||
2 | |||
3 | OUTPUT_ARCH(powerpc:common) | ||
4 | jiffies = jiffies_64 + 4; | ||
5 | SECTIONS | ||
6 | { | ||
7 | /* Read-only sections, merged into text segment: */ | ||
8 | . = + SIZEOF_HEADERS; | ||
9 | .interp : { *(.interp) } | ||
10 | .hash : { *(.hash) } | ||
11 | .dynsym : { *(.dynsym) } | ||
12 | .dynstr : { *(.dynstr) } | ||
13 | .rel.text : { *(.rel.text) } | ||
14 | .rela.text : { *(.rela.text) } | ||
15 | .rel.data : { *(.rel.data) } | ||
16 | .rela.data : { *(.rela.data) } | ||
17 | .rel.rodata : { *(.rel.rodata) } | ||
18 | .rela.rodata : { *(.rela.rodata) } | ||
19 | .rel.got : { *(.rel.got) } | ||
20 | .rela.got : { *(.rela.got) } | ||
21 | .rel.ctors : { *(.rel.ctors) } | ||
22 | .rela.ctors : { *(.rela.ctors) } | ||
23 | .rel.dtors : { *(.rel.dtors) } | ||
24 | .rela.dtors : { *(.rela.dtors) } | ||
25 | .rel.bss : { *(.rel.bss) } | ||
26 | .rela.bss : { *(.rela.bss) } | ||
27 | .rel.plt : { *(.rel.plt) } | ||
28 | .rela.plt : { *(.rela.plt) } | ||
29 | /* .init : { *(.init) } =0*/ | ||
30 | .plt : { *(.plt) } | ||
31 | .text : | ||
32 | { | ||
33 | *(.text) | ||
34 | SCHED_TEXT | ||
35 | LOCK_TEXT | ||
36 | *(.fixup) | ||
37 | *(.got1) | ||
38 | __got2_start = .; | ||
39 | *(.got2) | ||
40 | __got2_end = .; | ||
41 | } | ||
42 | _etext = .; | ||
43 | PROVIDE (etext = .); | ||
44 | |||
45 | RODATA | ||
46 | .fini : { *(.fini) } =0 | ||
47 | .ctors : { *(.ctors) } | ||
48 | .dtors : { *(.dtors) } | ||
49 | |||
50 | .fixup : { *(.fixup) } | ||
51 | |||
52 | __ex_table : { | ||
53 | __start___ex_table = .; | ||
54 | *(__ex_table) | ||
55 | __stop___ex_table = .; | ||
56 | } | ||
57 | |||
58 | __bug_table : { | ||
59 | __start___bug_table = .; | ||
60 | *(__bug_table) | ||
61 | __stop___bug_table = .; | ||
62 | } | ||
63 | |||
64 | /* Read-write section, merged into data segment: */ | ||
65 | . = ALIGN(4096); | ||
66 | .data : | ||
67 | { | ||
68 | *(.data) | ||
69 | *(.data1) | ||
70 | *(.sdata) | ||
71 | *(.sdata2) | ||
72 | *(.got.plt) *(.got) | ||
73 | *(.dynamic) | ||
74 | CONSTRUCTORS | ||
75 | } | ||
76 | |||
77 | . = ALIGN(4096); | ||
78 | __nosave_begin = .; | ||
79 | .data_nosave : { *(.data.nosave) } | ||
80 | . = ALIGN(4096); | ||
81 | __nosave_end = .; | ||
82 | |||
83 | . = ALIGN(32); | ||
84 | .data.cacheline_aligned : { *(.data.cacheline_aligned) } | ||
85 | |||
86 | _edata = .; | ||
87 | PROVIDE (edata = .); | ||
88 | |||
89 | . = ALIGN(8192); | ||
90 | .data.init_task : { *(.data.init_task) } | ||
91 | |||
92 | . = ALIGN(4096); | ||
93 | __init_begin = .; | ||
94 | .init.text : { | ||
95 | _sinittext = .; | ||
96 | *(.init.text) | ||
97 | _einittext = .; | ||
98 | } | ||
99 | /* .exit.text is discarded at runtime, not link time, | ||
100 | to deal with references from __bug_table */ | ||
101 | .exit.text : { *(.exit.text) } | ||
102 | .init.data : { | ||
103 | *(.init.data); | ||
104 | __vtop_table_begin = .; | ||
105 | *(.vtop_fixup); | ||
106 | __vtop_table_end = .; | ||
107 | __ptov_table_begin = .; | ||
108 | *(.ptov_fixup); | ||
109 | __ptov_table_end = .; | ||
110 | } | ||
111 | . = ALIGN(16); | ||
112 | __setup_start = .; | ||
113 | .init.setup : { *(.init.setup) } | ||
114 | __setup_end = .; | ||
115 | __initcall_start = .; | ||
116 | .initcall.init : { | ||
117 | *(.initcall1.init) | ||
118 | *(.initcall2.init) | ||
119 | *(.initcall3.init) | ||
120 | *(.initcall4.init) | ||
121 | *(.initcall5.init) | ||
122 | *(.initcall6.init) | ||
123 | *(.initcall7.init) | ||
124 | } | ||
125 | __initcall_end = .; | ||
126 | |||
127 | __con_initcall_start = .; | ||
128 | .con_initcall.init : { *(.con_initcall.init) } | ||
129 | __con_initcall_end = .; | ||
130 | |||
131 | SECURITY_INIT | ||
132 | |||
133 | __start___ftr_fixup = .; | ||
134 | __ftr_fixup : { *(__ftr_fixup) } | ||
135 | __stop___ftr_fixup = .; | ||
136 | |||
137 | . = ALIGN(32); | ||
138 | __per_cpu_start = .; | ||
139 | .data.percpu : { *(.data.percpu) } | ||
140 | __per_cpu_end = .; | ||
141 | |||
142 | . = ALIGN(4096); | ||
143 | __initramfs_start = .; | ||
144 | .init.ramfs : { *(.init.ramfs) } | ||
145 | __initramfs_end = .; | ||
146 | |||
147 | . = ALIGN(4096); | ||
148 | __init_end = .; | ||
149 | |||
150 | . = ALIGN(4096); | ||
151 | _sextratext = .; | ||
152 | _eextratext = .; | ||
153 | |||
154 | __bss_start = .; | ||
155 | .bss : | ||
156 | { | ||
157 | *(.sbss) *(.scommon) | ||
158 | *(.dynbss) | ||
159 | *(.bss) | ||
160 | *(COMMON) | ||
161 | } | ||
162 | __bss_stop = .; | ||
163 | |||
164 | _end = . ; | ||
165 | PROVIDE (end = .); | ||
166 | |||
167 | /* Sections to be discarded. */ | ||
168 | /DISCARD/ : { | ||
169 | *(.exitcall.exit) | ||
170 | *(.exit.data) | ||
171 | } | ||
172 | } | ||