aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile56
-rw-r--r--arch/powerpc/kernel/asm-offsets.c273
-rw-r--r--arch/powerpc/kernel/binfmt_elf32.c75
-rw-r--r--arch/powerpc/kernel/btext.c853
-rw-r--r--arch/powerpc/kernel/cputable.c996
-rw-r--r--arch/powerpc/kernel/entry_32.S1000
-rw-r--r--arch/powerpc/kernel/entry_64.S842
-rw-r--r--arch/powerpc/kernel/fpu.S144
-rw-r--r--arch/powerpc/kernel/head_32.S1381
-rw-r--r--arch/powerpc/kernel/head_44x.S782
-rw-r--r--arch/powerpc/kernel/head_4xx.S1022
-rw-r--r--arch/powerpc/kernel/head_64.S1957
-rw-r--r--arch/powerpc/kernel/head_8xx.S860
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S1063
-rw-r--r--arch/powerpc/kernel/idle_6xx.S233
-rw-r--r--arch/powerpc/kernel/idle_power4.S78
-rw-r--r--arch/powerpc/kernel/init_task.c36
-rw-r--r--arch/powerpc/kernel/lparmap.c31
-rw-r--r--arch/powerpc/kernel/misc_32.S1037
-rw-r--r--arch/powerpc/kernel/misc_64.S880
-rw-r--r--arch/powerpc/kernel/of_device.c276
-rw-r--r--arch/powerpc/kernel/pmc.c112
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c273
-rw-r--r--arch/powerpc/kernel/process.c919
-rw-r--r--arch/powerpc/kernel/prom.c2170
-rw-r--r--arch/powerpc/kernel/prom_init.c2109
-rw-r--r--arch/powerpc/kernel/ptrace.c613
-rw-r--r--arch/powerpc/kernel/ptrace32.c450
-rw-r--r--arch/powerpc/kernel/rtas.c680
-rw-r--r--arch/powerpc/kernel/semaphore.c135
-rw-r--r--arch/powerpc/kernel/setup-common.c410
-rw-r--r--arch/powerpc/kernel/setup_32.c372
-rw-r--r--arch/powerpc/kernel/setup_64.c1028
-rw-r--r--arch/powerpc/kernel/signal_32.c1269
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c1008
-rw-r--r--arch/powerpc/kernel/syscalls.c358
-rw-r--r--arch/powerpc/kernel/systbl.S321
-rw-r--r--arch/powerpc/kernel/time.c1005
-rw-r--r--arch/powerpc/kernel/traps.c1101
-rw-r--r--arch/powerpc/kernel/vecemu.c345
-rw-r--r--arch/powerpc/kernel/vector.S197
-rw-r--r--arch/powerpc/kernel/vio.c271
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S279
43 files changed, 29300 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
new file mode 100644
index 000000000000..572d4f5eaacb
--- /dev/null
+++ b/arch/powerpc/kernel/Makefile
@@ -0,0 +1,56 @@
1#
2# Makefile for the linux kernel.
3#
4
5ifeq ($(CONFIG_PPC64),y)
6EXTRA_CFLAGS += -mno-minimal-toc
7endif
8ifeq ($(CONFIG_PPC32),y)
9CFLAGS_prom_init.o += -fPIC
10CFLAGS_btext.o += -fPIC
11endif
12
13obj-y := semaphore.o cputable.o ptrace.o syscalls.o \
14 signal_32.o pmc.o
15obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
16 ptrace32.o systbl.o
17obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
18obj-$(CONFIG_POWER4) += idle_power4.o
19obj-$(CONFIG_PPC_OF) += of_device.o
20obj-$(CONFIG_PPC_RTAS) += rtas.o
21obj-$(CONFIG_IBMVIO) += vio.o
22
23ifeq ($(CONFIG_PPC_MERGE),y)
24
25extra-$(CONFIG_PPC_STD_MMU) := head_32.o
26extra-$(CONFIG_PPC64) := head_64.o
27extra-$(CONFIG_40x) := head_4xx.o
28extra-$(CONFIG_44x) := head_44x.o
29extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
30extra-$(CONFIG_8xx) := head_8xx.o
31extra-y += vmlinux.lds
32
33obj-y += process.o init_task.o time.o \
34 prom.o traps.o setup-common.o
35obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o systbl.o
36obj-$(CONFIG_PPC64) += misc_64.o
37obj-$(CONFIG_PPC_OF) += prom_init.o
38obj-$(CONFIG_MODULES) += ppc_ksyms.o
39obj-$(CONFIG_BOOTX_TEXT) += btext.o
40obj-$(CONFIG_6xx) += idle_6xx.o
41
42ifeq ($(CONFIG_PPC_ISERIES),y)
43$(obj)/head_64.o: $(obj)/lparmap.s
44AFLAGS_head_64.o += -I$(obj)
45endif
46
47else
48# stuff used from here for ARCH=ppc or ARCH=ppc64
49obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o \
50 setup-common.o
51
52
53endif
54
55extra-$(CONFIG_PPC_FPU) += fpu.o
56extra-$(CONFIG_PPC64) += entry_64.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
new file mode 100644
index 000000000000..330cd783206f
--- /dev/null
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -0,0 +1,273 @@
1/*
2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/config.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#ifdef CONFIG_PPC64
26#include <linux/time.h>
27#include <linux/hardirq.h>
28#else
29#include <linux/ptrace.h>
30#include <linux/suspend.h>
31#endif
32
33#include <asm/io.h>
34#include <asm/page.h>
35#include <asm/pgtable.h>
36#include <asm/processor.h>
37#include <asm/cputable.h>
38#include <asm/thread_info.h>
39#include <asm/rtas.h>
40#ifdef CONFIG_PPC64
41#include <asm/paca.h>
42#include <asm/lppaca.h>
43#include <asm/iSeries/HvLpEvent.h>
44#include <asm/cache.h>
45#include <asm/systemcfg.h>
46#include <asm/compat.h>
47#endif
48
49#define DEFINE(sym, val) \
50 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
51
52#define BLANK() asm volatile("\n->" : : )
53
54int main(void)
55{
56 DEFINE(THREAD, offsetof(struct task_struct, thread));
57 DEFINE(MM, offsetof(struct task_struct, mm));
58#ifdef CONFIG_PPC64
59 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
60#else
61 DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info));
62 DEFINE(PTRACE, offsetof(struct task_struct, ptrace));
63#endif /* CONFIG_PPC64 */
64
65 DEFINE(KSP, offsetof(struct thread_struct, ksp));
66 DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
67 DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
68 DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
69 DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
70#ifdef CONFIG_ALTIVEC
71 DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
72 DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
73 DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
74 DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
75#endif /* CONFIG_ALTIVEC */
76#ifdef CONFIG_PPC64
77 DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
78#else /* CONFIG_PPC64 */
79 DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
80 DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
81#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
82 DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
83 DEFINE(PT_PTRACED, PT_PTRACED);
84#endif
85#ifdef CONFIG_SPE
86 DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0]));
87 DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc));
88 DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr));
89 DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
90#endif /* CONFIG_SPE */
91#endif /* CONFIG_PPC64 */
92
93 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
94 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
95 DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
96#ifdef CONFIG_PPC32
97 DEFINE(TI_TASK, offsetof(struct thread_info, task));
98 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
99 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
100#endif /* CONFIG_PPC32 */
101
102#ifdef CONFIG_PPC64
103 DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
104 DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
105 DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
106 DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
107 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
108 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
109 DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
110 DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
111
112 /* paca */
113 DEFINE(PACA_SIZE, sizeof(struct paca_struct));
114 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
115 DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
116 DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
117 DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
118 DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
119 DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
120 DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
121 DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
122 DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
123 DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
124 DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
125 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
126 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
127 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
128#ifdef CONFIG_HUGETLB_PAGE
129 DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
130 DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
131#endif /* CONFIG_HUGETLB_PAGE */
132 DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
133 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
134 DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
135 DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
136 DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi));
137 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
138 DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca));
139 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
140
141 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
142 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
143 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
144 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
145#endif /* CONFIG_PPC64 */
146
147 /* RTAS */
148 DEFINE(RTASBASE, offsetof(struct rtas_t, base));
149 DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
150
151 /* Interrupt register frame */
152 DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
153#ifndef CONFIG_PPC64
154 DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
155#else /* CONFIG_PPC64 */
156 DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
157 /* 288 = # of volatile regs, int & fp, for leaf routines */
158 /* which do not stack a frame. See the PPC64 ABI. */
159 DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288);
160 /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
161 DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
162 DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
163#endif /* CONFIG_PPC64 */
164 DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
165 DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
166 DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
167 DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
168 DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
169 DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
170 DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
171 DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
172 DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
173 DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
174 DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
175 DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
176 DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
177 DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
178#ifndef CONFIG_PPC64
179 DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14]));
180 DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15]));
181 DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16]));
182 DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17]));
183 DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18]));
184 DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19]));
185 DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20]));
186 DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21]));
187 DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22]));
188 DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23]));
189 DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24]));
190 DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25]));
191 DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26]));
192 DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27]));
193 DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28]));
194 DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29]));
195 DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30]));
196 DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31]));
197#endif /* CONFIG_PPC64 */
198 /*
199 * Note: these symbols include _ because they overlap with special
200 * register names
201 */
202 DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
203 DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
204 DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
205 DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
206 DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
207 DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
208 DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
209 DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
210 DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
211 DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
212 DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
213#ifndef CONFIG_PPC64
214 DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq));
215 /*
216 * The PowerPC 400-class & Book-E processors have neither the DAR
217 * nor the DSISR SPRs. Hence, we overload them to hold the similar
218 * DEAR and ESR SPRs for such processors. For critical interrupts
219 * we use them to hold SRR0 and SRR1.
220 */
221 DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
222 DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
223#else /* CONFIG_PPC64 */
224 DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
225
226 /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
227 DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
228 DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
229#endif /* CONFIG_PPC64 */
230
231 DEFINE(CLONE_VM, CLONE_VM);
232 DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
233
234#ifndef CONFIG_PPC64
235 DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
236#endif /* ! CONFIG_PPC64 */
237
238 /* About the CPU features table */
239 DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
240 DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
241 DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
242 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
243 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
244
245#ifndef CONFIG_PPC64
246 DEFINE(pbe_address, offsetof(struct pbe, address));
247 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
248 DEFINE(pbe_next, offsetof(struct pbe, next));
249
250 DEFINE(TASK_SIZE, TASK_SIZE);
251 DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
252#else /* CONFIG_PPC64 */
253 /* systemcfg offsets for use by vdso */
254 DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct systemcfg, tb_orig_stamp));
255 DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct systemcfg, tb_ticks_per_sec));
256 DEFINE(CFG_TB_TO_XS, offsetof(struct systemcfg, tb_to_xs));
257 DEFINE(CFG_STAMP_XSEC, offsetof(struct systemcfg, stamp_xsec));
258 DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct systemcfg, tb_update_count));
259 DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct systemcfg, tz_minuteswest));
260 DEFINE(CFG_TZ_DSTTIME, offsetof(struct systemcfg, tz_dsttime));
261 DEFINE(CFG_SYSCALL_MAP32, offsetof(struct systemcfg, syscall_map_32));
262 DEFINE(CFG_SYSCALL_MAP64, offsetof(struct systemcfg, syscall_map_64));
263
264 /* timeval/timezone offsets for use by vdso */
265 DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec));
266 DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
267 DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
268 DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
269 DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
270 DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
271#endif /* CONFIG_PPC64 */
272 return 0;
273}
diff --git a/arch/powerpc/kernel/binfmt_elf32.c b/arch/powerpc/kernel/binfmt_elf32.c
new file mode 100644
index 000000000000..8ad6b0f33651
--- /dev/null
+++ b/arch/powerpc/kernel/binfmt_elf32.c
@@ -0,0 +1,75 @@
1/*
2 * binfmt_elf32.c: Support 32-bit PPC ELF binaries on Power3 and followons.
3 * based on the SPARC64 version.
4 * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
6 *
7 * Copyright (C) 2000,2001 Ken Aaker (kdaaker@rchland.vnet.ibm.com), IBM Corp
8 * Copyright (C) 2001 Anton Blanchard (anton@au.ibm.com), IBM
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#define ELF_ARCH EM_PPC
17#define ELF_CLASS ELFCLASS32
18#define ELF_DATA ELFDATA2MSB;
19
20#include <asm/processor.h>
21#include <linux/module.h>
22#include <linux/config.h>
23#include <linux/elfcore.h>
24#include <linux/compat.h>
25
26#define elf_prstatus elf_prstatus32
27struct elf_prstatus32
28{
29 struct elf_siginfo pr_info; /* Info associated with signal */
30 short pr_cursig; /* Current signal */
31 unsigned int pr_sigpend; /* Set of pending signals */
32 unsigned int pr_sighold; /* Set of held signals */
33 pid_t pr_pid;
34 pid_t pr_ppid;
35 pid_t pr_pgrp;
36 pid_t pr_sid;
37 struct compat_timeval pr_utime; /* User time */
38 struct compat_timeval pr_stime; /* System time */
39 struct compat_timeval pr_cutime; /* Cumulative user time */
40 struct compat_timeval pr_cstime; /* Cumulative system time */
41 elf_gregset_t pr_reg; /* General purpose registers. */
42 int pr_fpvalid; /* True if math co-processor being used. */
43};
44
45#define elf_prpsinfo elf_prpsinfo32
46struct elf_prpsinfo32
47{
48 char pr_state; /* numeric process state */
49 char pr_sname; /* char for pr_state */
50 char pr_zomb; /* zombie */
51 char pr_nice; /* nice val */
52 unsigned int pr_flag; /* flags */
53 u32 pr_uid;
54 u32 pr_gid;
55 pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
56 /* Lots missing */
57 char pr_fname[16]; /* filename of executable */
58 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
59};
60
61#include <linux/time.h>
62
63#undef cputime_to_timeval
64#define cputime_to_timeval cputime_to_compat_timeval
65static __inline__ void
66cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
67{
68 unsigned long jiffies = cputime_to_jiffies(cputime);
69 value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
70 value->tv_sec = jiffies / HZ;
71}
72
73#define init_elf_binfmt init_elf32_binfmt
74
75#include "../../../fs/binfmt_elf.c"
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
new file mode 100644
index 000000000000..bdfba92b2b38
--- /dev/null
+++ b/arch/powerpc/kernel/btext.c
@@ -0,0 +1,853 @@
1/*
2 * Procedures for drawing on the screen early on in the boot process.
3 *
4 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
5 */
6#include <linux/config.h>
7#include <linux/kernel.h>
8#include <linux/string.h>
9#include <linux/init.h>
10#include <linux/module.h>
11
12#include <asm/sections.h>
13#include <asm/prom.h>
14#include <asm/btext.h>
15#include <asm/prom.h>
16#include <asm/page.h>
17#include <asm/mmu.h>
18#include <asm/pgtable.h>
19#include <asm/io.h>
20#include <asm/lmb.h>
21#include <asm/processor.h>
22
23#define NO_SCROLL
24
25#ifndef NO_SCROLL
26static void scrollscreen(void);
27#endif
28
29static void draw_byte(unsigned char c, long locX, long locY);
30static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
31static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
32static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
33
34static int g_loc_X;
35static int g_loc_Y;
36static int g_max_loc_X;
37static int g_max_loc_Y;
38
39static int dispDeviceRowBytes;
40static int dispDeviceDepth;
41static int dispDeviceRect[4];
42static unsigned char *dispDeviceBase, *logicalDisplayBase;
43
44unsigned long disp_BAT[2] __initdata = {0, 0};
45
46#define cmapsz (16*256)
47
48static unsigned char vga_font[cmapsz];
49
50int boot_text_mapped;
51int force_printk_to_btext = 0;
52
53#ifdef CONFIG_PPC32
54/* Calc BAT values for mapping the display and store them
55 * in disp_BAT. Those values are then used from head.S to map
56 * the display during identify_machine() and MMU_Init()
57 *
58 * The display is mapped to virtual address 0xD0000000, rather
59 * than 1:1, because some some CHRP machines put the frame buffer
60 * in the region starting at 0xC0000000 (KERNELBASE).
61 * This mapping is temporary and will disappear as soon as the
62 * setup done by MMU_Init() is applied.
63 *
64 * For now, we align the BAT and then map 8Mb on 601 and 16Mb
65 * on other PPCs. This may cause trouble if the framebuffer
66 * is really badly aligned, but I didn't encounter this case
67 * yet.
68 */
69void __init
70btext_prepare_BAT(void)
71{
72 unsigned long vaddr = KERNELBASE + 0x10000000;
73 unsigned long addr;
74 unsigned long lowbits;
75
76 addr = (unsigned long)dispDeviceBase;
77 if (!addr) {
78 boot_text_mapped = 0;
79 return;
80 }
81 if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
82 /* 603, 604, G3, G4, ... */
83 lowbits = addr & ~0xFF000000UL;
84 addr &= 0xFF000000UL;
85 disp_BAT[0] = vaddr | (BL_16M<<2) | 2;
86 disp_BAT[1] = addr | (_PAGE_NO_CACHE | _PAGE_GUARDED | BPP_RW);
87 } else {
88 /* 601 */
89 lowbits = addr & ~0xFF800000UL;
90 addr &= 0xFF800000UL;
91 disp_BAT[0] = vaddr | (_PAGE_NO_CACHE | PP_RWXX) | 4;
92 disp_BAT[1] = addr | BL_8M | 0x40;
93 }
94 logicalDisplayBase = (void *) (vaddr + lowbits);
95}
96#endif
97
98/* This function will enable the early boot text when doing OF booting. This
99 * way, xmon output should work too
100 */
101void __init
102btext_setup_display(int width, int height, int depth, int pitch,
103 unsigned long address)
104{
105 g_loc_X = 0;
106 g_loc_Y = 0;
107 g_max_loc_X = width / 8;
108 g_max_loc_Y = height / 16;
109 logicalDisplayBase = (unsigned char *)address;
110 dispDeviceBase = (unsigned char *)address;
111 dispDeviceRowBytes = pitch;
112 dispDeviceDepth = depth;
113 dispDeviceRect[0] = dispDeviceRect[1] = 0;
114 dispDeviceRect[2] = width;
115 dispDeviceRect[3] = height;
116 boot_text_mapped = 1;
117}
118
119/* Here's a small text engine to use during early boot
120 * or for debugging purposes
121 *
122 * todo:
123 *
124 * - build some kind of vgacon with it to enable early printk
125 * - move to a separate file
126 * - add a few video driver hooks to keep in sync with display
127 * changes.
128 */
129
130void map_boot_text(void)
131{
132 unsigned long base, offset, size;
133 unsigned char *vbase;
134
135 /* By default, we are no longer mapped */
136 boot_text_mapped = 0;
137 if (dispDeviceBase == 0)
138 return;
139 base = ((unsigned long) dispDeviceBase) & 0xFFFFF000UL;
140 offset = ((unsigned long) dispDeviceBase) - base;
141 size = dispDeviceRowBytes * dispDeviceRect[3] + offset
142 + dispDeviceRect[0];
143 vbase = __ioremap(base, size, _PAGE_NO_CACHE);
144 if (vbase == 0)
145 return;
146 logicalDisplayBase = vbase + offset;
147 boot_text_mapped = 1;
148}
149
150int btext_initialize(struct device_node *np)
151{
152 unsigned int width, height, depth, pitch;
153 unsigned long address = 0;
154 u32 *prop;
155
156 prop = (u32 *)get_property(np, "width", NULL);
157 if (prop == NULL)
158 return -EINVAL;
159 width = *prop;
160 prop = (u32 *)get_property(np, "height", NULL);
161 if (prop == NULL)
162 return -EINVAL;
163 height = *prop;
164 prop = (u32 *)get_property(np, "depth", NULL);
165 if (prop == NULL)
166 return -EINVAL;
167 depth = *prop;
168 pitch = width * ((depth + 7) / 8);
169 prop = (u32 *)get_property(np, "linebytes", NULL);
170 if (prop)
171 pitch = *prop;
172 if (pitch == 1)
173 pitch = 0x1000;
174 prop = (u32 *)get_property(np, "address", NULL);
175 if (prop)
176 address = *prop;
177
178 /* FIXME: Add support for PCI reg properties */
179
180 if (address == 0)
181 return -EINVAL;
182
183 g_loc_X = 0;
184 g_loc_Y = 0;
185 g_max_loc_X = width / 8;
186 g_max_loc_Y = height / 16;
187 logicalDisplayBase = (unsigned char *)address;
188 dispDeviceBase = (unsigned char *)address;
189 dispDeviceRowBytes = pitch;
190 dispDeviceDepth = depth;
191 dispDeviceRect[0] = dispDeviceRect[1] = 0;
192 dispDeviceRect[2] = width;
193 dispDeviceRect[3] = height;
194
195 map_boot_text();
196
197 return 0;
198}
199
200void __init init_boot_display(void)
201{
202 char *name;
203 struct device_node *np = NULL;
204 int rc = -ENODEV;
205
206 printk("trying to initialize btext ...\n");
207
208 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
209 if (name != NULL) {
210 np = of_find_node_by_path(name);
211 if (np != NULL) {
212 if (strcmp(np->type, "display") != 0) {
213 printk("boot stdout isn't a display !\n");
214 of_node_put(np);
215 np = NULL;
216 }
217 }
218 }
219 if (np)
220 rc = btext_initialize(np);
221 if (rc == 0)
222 return;
223
224 for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
225 if (get_property(np, "linux,opened", NULL)) {
226 printk("trying %s ...\n", np->full_name);
227 rc = btext_initialize(np);
228 printk("result: %d\n", rc);
229 }
230 if (rc == 0)
231 return;
232 }
233}
234
235/* Calc the base address of a given point (x,y) */
236static unsigned char * calc_base(int x, int y)
237{
238 unsigned char *base;
239
240 base = logicalDisplayBase;
241 if (base == 0)
242 base = dispDeviceBase;
243 base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3);
244 base += (y + dispDeviceRect[1]) * dispDeviceRowBytes;
245 return base;
246}
247
248/* Adjust the display to a new resolution */
249void btext_update_display(unsigned long phys, int width, int height,
250 int depth, int pitch)
251{
252 if (dispDeviceBase == 0)
253 return;
254
255 /* check it's the same frame buffer (within 256MB) */
256 if ((phys ^ (unsigned long)dispDeviceBase) & 0xf0000000)
257 return;
258
259 dispDeviceBase = (__u8 *) phys;
260 dispDeviceRect[0] = 0;
261 dispDeviceRect[1] = 0;
262 dispDeviceRect[2] = width;
263 dispDeviceRect[3] = height;
264 dispDeviceDepth = depth;
265 dispDeviceRowBytes = pitch;
266 if (boot_text_mapped) {
267 iounmap(logicalDisplayBase);
268 boot_text_mapped = 0;
269 }
270 map_boot_text();
271 g_loc_X = 0;
272 g_loc_Y = 0;
273 g_max_loc_X = width / 8;
274 g_max_loc_Y = height / 16;
275}
276EXPORT_SYMBOL(btext_update_display);
277
278void btext_clearscreen(void)
279{
280 unsigned long *base = (unsigned long *)calc_base(0, 0);
281 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
282 (dispDeviceDepth >> 3)) >> 3;
283 int i,j;
284
285 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++)
286 {
287 unsigned long *ptr = base;
288 for(j=width; j; --j)
289 *(ptr++) = 0;
290 base += (dispDeviceRowBytes >> 3);
291 }
292}
293
294#ifndef NO_SCROLL
295static void scrollscreen(void)
296{
297 unsigned long *src = (unsigned long *)calc_base(0,16);
298 unsigned long *dst = (unsigned long *)calc_base(0,0);
299 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
300 (dispDeviceDepth >> 3)) >> 3;
301 int i,j;
302
303 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++)
304 {
305 unsigned long *src_ptr = src;
306 unsigned long *dst_ptr = dst;
307 for(j=width; j; --j)
308 *(dst_ptr++) = *(src_ptr++);
309 src += (dispDeviceRowBytes >> 3);
310 dst += (dispDeviceRowBytes >> 3);
311 }
312 for (i=0; i<16; i++)
313 {
314 unsigned long *dst_ptr = dst;
315 for(j=width; j; --j)
316 *(dst_ptr++) = 0;
317 dst += (dispDeviceRowBytes >> 3);
318 }
319}
320#endif /* ndef NO_SCROLL */
321
322void btext_drawchar(char c)
323{
324 int cline = 0;
325#ifdef NO_SCROLL
326 int x;
327#endif
328 if (!boot_text_mapped)
329 return;
330
331 switch (c) {
332 case '\b':
333 if (g_loc_X > 0)
334 --g_loc_X;
335 break;
336 case '\t':
337 g_loc_X = (g_loc_X & -8) + 8;
338 break;
339 case '\r':
340 g_loc_X = 0;
341 break;
342 case '\n':
343 g_loc_X = 0;
344 g_loc_Y++;
345 cline = 1;
346 break;
347 default:
348 draw_byte(c, g_loc_X++, g_loc_Y);
349 }
350 if (g_loc_X >= g_max_loc_X) {
351 g_loc_X = 0;
352 g_loc_Y++;
353 cline = 1;
354 }
355#ifndef NO_SCROLL
356 while (g_loc_Y >= g_max_loc_Y) {
357 scrollscreen();
358 g_loc_Y--;
359 }
360#else
361 /* wrap around from bottom to top of screen so we don't
362 waste time scrolling each line. -- paulus. */
363 if (g_loc_Y >= g_max_loc_Y)
364 g_loc_Y = 0;
365 if (cline) {
366 for (x = 0; x < g_max_loc_X; ++x)
367 draw_byte(' ', x, g_loc_Y);
368 }
369#endif
370}
371
372void btext_drawstring(const char *c)
373{
374 if (!boot_text_mapped)
375 return;
376 while (*c)
377 btext_drawchar(*c++);
378}
379
380void btext_drawhex(unsigned long v)
381{
382 char *hex_table = "0123456789abcdef";
383
384 if (!boot_text_mapped)
385 return;
386#ifdef CONFIG_PPC64
387 btext_drawchar(hex_table[(v >> 60) & 0x0000000FUL]);
388 btext_drawchar(hex_table[(v >> 56) & 0x0000000FUL]);
389 btext_drawchar(hex_table[(v >> 52) & 0x0000000FUL]);
390 btext_drawchar(hex_table[(v >> 48) & 0x0000000FUL]);
391 btext_drawchar(hex_table[(v >> 44) & 0x0000000FUL]);
392 btext_drawchar(hex_table[(v >> 40) & 0x0000000FUL]);
393 btext_drawchar(hex_table[(v >> 36) & 0x0000000FUL]);
394 btext_drawchar(hex_table[(v >> 32) & 0x0000000FUL]);
395#endif
396 btext_drawchar(hex_table[(v >> 28) & 0x0000000FUL]);
397 btext_drawchar(hex_table[(v >> 24) & 0x0000000FUL]);
398 btext_drawchar(hex_table[(v >> 20) & 0x0000000FUL]);
399 btext_drawchar(hex_table[(v >> 16) & 0x0000000FUL]);
400 btext_drawchar(hex_table[(v >> 12) & 0x0000000FUL]);
401 btext_drawchar(hex_table[(v >> 8) & 0x0000000FUL]);
402 btext_drawchar(hex_table[(v >> 4) & 0x0000000FUL]);
403 btext_drawchar(hex_table[(v >> 0) & 0x0000000FUL]);
404 btext_drawchar(' ');
405}
406
407static void draw_byte(unsigned char c, long locX, long locY)
408{
409 unsigned char *base = calc_base(locX << 3, locY << 4);
410 unsigned char *font = &vga_font[((unsigned int)c) * 16];
411 int rb = dispDeviceRowBytes;
412
413 switch(dispDeviceDepth) {
414 case 24:
415 case 32:
416 draw_byte_32(font, (unsigned int *)base, rb);
417 break;
418 case 15:
419 case 16:
420 draw_byte_16(font, (unsigned int *)base, rb);
421 break;
422 case 8:
423 draw_byte_8(font, (unsigned int *)base, rb);
424 break;
425 }
426}
427
428static unsigned int expand_bits_8[16] = {
429 0x00000000,
430 0x000000ff,
431 0x0000ff00,
432 0x0000ffff,
433 0x00ff0000,
434 0x00ff00ff,
435 0x00ffff00,
436 0x00ffffff,
437 0xff000000,
438 0xff0000ff,
439 0xff00ff00,
440 0xff00ffff,
441 0xffff0000,
442 0xffff00ff,
443 0xffffff00,
444 0xffffffff
445};
446
447static unsigned int expand_bits_16[4] = {
448 0x00000000,
449 0x0000ffff,
450 0xffff0000,
451 0xffffffff
452};
453
454
455static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
456{
457 int l, bits;
458 int fg = 0xFFFFFFFFUL;
459 int bg = 0x00000000UL;
460
461 for (l = 0; l < 16; ++l)
462 {
463 bits = *font++;
464 base[0] = (-(bits >> 7) & fg) ^ bg;
465 base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
466 base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
467 base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
468 base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
469 base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
470 base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
471 base[7] = (-(bits & 1) & fg) ^ bg;
472 base = (unsigned int *) ((char *)base + rb);
473 }
474}
475
476static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
477{
478 int l, bits;
479 int fg = 0xFFFFFFFFUL;
480 int bg = 0x00000000UL;
481 unsigned int *eb = (int *)expand_bits_16;
482
483 for (l = 0; l < 16; ++l)
484 {
485 bits = *font++;
486 base[0] = (eb[bits >> 6] & fg) ^ bg;
487 base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
488 base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
489 base[3] = (eb[bits & 3] & fg) ^ bg;
490 base = (unsigned int *) ((char *)base + rb);
491 }
492}
493
494static void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
495{
496 int l, bits;
497 int fg = 0x0F0F0F0FUL;
498 int bg = 0x00000000UL;
499 unsigned int *eb = (int *)expand_bits_8;
500
501 for (l = 0; l < 16; ++l)
502 {
503 bits = *font++;
504 base[0] = (eb[bits >> 4] & fg) ^ bg;
505 base[1] = (eb[bits & 0xf] & fg) ^ bg;
506 base = (unsigned int *) ((char *)base + rb);
507 }
508}
509
510static unsigned char vga_font[cmapsz] = {
5110x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5120x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
5130x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
5140xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
5150x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
5160x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
5170x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
5180x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
5190x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
5200x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
5210x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
5220xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
5230x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
5240x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
5250xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
5260x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
5270x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
5280x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
5290x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
5300x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
5310x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
5320x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
5330xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
5340x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
5350x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
5360x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
5370x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
5380xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
5390x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
5400x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5410xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
5420x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
5430x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
5440x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
5450x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5460x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5470x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
5480x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
5490xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5500x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5510x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
5520x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
5530x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5540x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5550x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
5560x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
5570x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
5580x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
5590x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
5600x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
5610x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
5620x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
5630x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5640x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
5650x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
5660x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
5670x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
5680x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
5690x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5700x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
5710x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
5720x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5730x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5740x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
5750x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
5760x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
5770x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
5780x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
5790x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
5800x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
5810x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
5820xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
5830x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
5840x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
5850x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
5860xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
5870x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
5880x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
5890x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5900x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
5910x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
5920x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
5930x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
5940x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
5950x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
5960x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
5970xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
5980x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
5990x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
6000x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
6010xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
6020x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
6030x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
6040x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
6050x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
6060xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
6070x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
6080x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
6090x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
6100x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
6110x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
6120x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
6130x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
6140xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
6150x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
6160x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
6170xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
6180x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
6190x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
6200x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
6210x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
6220xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
6230x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
6240x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
6250xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
6260xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
6270x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
6280x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
6290x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
6300xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
6310x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
6320x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
6330x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
6340xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
6350x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
6360x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
6370x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6380x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
6390x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6400x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
6410xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
6420x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
6430x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
6440x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
6450xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6460x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
6470x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
6480x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
6490xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
6500x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
6510x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
6520x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
6530x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
6540x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
6550x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
6560x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
6570xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6580x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
6590x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
6600x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
6610x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
6620x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
6630x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
6640x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
6650x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
6660x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
6670x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
6680x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
6690xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6700x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
6710x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
6720x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
6730xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
6740x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
6750x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
6760x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
6770x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
6780x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
6790x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6800x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
6810xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
6820xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
6830x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
6840x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
6850xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
6860x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
6870x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
6880x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
6890xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
6900x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
6910x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
6920x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
6930xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
6940x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
6950x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
6960x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
6970x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
6980x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
6990x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
7000x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
7010xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
7020x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
7030x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
7040x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
7050x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
7060xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
7070x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
7080x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
7090xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
7100x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
7110x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
7120x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
7130xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
7140x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
7150x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
7160x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
7170xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
7180xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
7190x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
7200x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
7210xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
7220x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
7230x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
7240xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
7250xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
7260x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
7270x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
7280x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
7290xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
7300x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
7310x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
7320x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
7330x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
7340x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7350x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
7360x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
7370xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7380x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
7390x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
7400x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
7410x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
7420x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
7430x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
7440x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
7450x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
7460x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
7470x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
7480x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
7490xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
7500x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7510x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
7520x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
7530x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
7540x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7550x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
7560x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
7570x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
7580x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7590x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7600x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
7610x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7620x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7630x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
7640x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
7650x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7660x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7670x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
7680x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
7690x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7700x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7710x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
7720x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
7730x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
7740x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7750x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
7760x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
7770x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7780x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7790x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
7800x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
7810x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7820x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7830x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
7840x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
7850x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
7860x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7870x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
7880x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
7890x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7900x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7910x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
7920x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
7930x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
7940x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7950x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
7960x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
7970x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7980x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7990x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
8000x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
8010x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8020x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
8030xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
8040xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
8050xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
8060xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
8070x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
8080x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
8090x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8100x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
8110x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
8120x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
8130xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8140xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
8150x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
8160x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
8170xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8180x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
8190x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
8200x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
8210x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
8220x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
8230x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
8240x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
8250x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8260x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8270x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
8280x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
8290x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
8300xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
8310x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
8320x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
8330x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
8340x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
8350x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
8360x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
8370x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
8380x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
8390x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
8400x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
8410x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
8420x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8430x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
8440x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8450x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
8460x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
8470x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
8480x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
8490x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8500x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
8510x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8520x00, 0x00, 0x00, 0x00,
853};
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
new file mode 100644
index 000000000000..b91345fa0805
--- /dev/null
+++ b/arch/powerpc/kernel/cputable.c
@@ -0,0 +1,996 @@
1/*
2 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
3 *
4 * Modifications for ppc64:
5 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <linux/string.h>
15#include <linux/sched.h>
16#include <linux/threads.h>
17#include <linux/init.h>
18#include <linux/module.h>
19
20#include <asm/oprofile_impl.h>
21#include <asm/cputable.h>
22
23struct cpu_spec* cur_cpu_spec = NULL;
24EXPORT_SYMBOL(cur_cpu_spec);
25
26/* NOTE:
27 * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's
28 * the responsibility of the appropriate CPU save/restore functions to
29 * eventually copy these settings over. Those save/restore aren't yet
30 * part of the cputable though. That has to be fixed for both ppc32
31 * and ppc64
32 */
33#ifdef CONFIG_PPC64
34extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
35extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
36extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
37#else
38extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
39extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
40extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
41extern void __setup_cpu_750cx(unsigned long offset, struct cpu_spec* spec);
42extern void __setup_cpu_750fx(unsigned long offset, struct cpu_spec* spec);
43extern void __setup_cpu_7400(unsigned long offset, struct cpu_spec* spec);
44extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec);
45extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec);
46#endif /* CONFIG_PPC32 */
47extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
48
49/* This table only contains "desktop" CPUs, it need to be filled with embedded
50 * ones as well...
51 */
52#define COMMON_USER (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \
53 PPC_FEATURE_HAS_MMU)
54#define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64)
55
56
57/* We only set the spe features if the kernel was compiled with
58 * spe support
59 */
60#ifdef CONFIG_SPE
61#define PPC_FEATURE_SPE_COMP PPC_FEATURE_HAS_SPE
62#else
63#define PPC_FEATURE_SPE_COMP 0
64#endif
65
66struct cpu_spec cpu_specs[] = {
67#ifdef CONFIG_PPC64
68 { /* Power3 */
69 .pvr_mask = 0xffff0000,
70 .pvr_value = 0x00400000,
71 .cpu_name = "POWER3 (630)",
72 .cpu_features = CPU_FTRS_POWER3,
73 .cpu_user_features = COMMON_USER_PPC64,
74 .icache_bsize = 128,
75 .dcache_bsize = 128,
76 .num_pmcs = 8,
77 .cpu_setup = __setup_cpu_power3,
78#ifdef CONFIG_OPROFILE
79 .oprofile_cpu_type = "ppc64/power3",
80 .oprofile_model = &op_model_rs64,
81#endif
82 },
83 { /* Power3+ */
84 .pvr_mask = 0xffff0000,
85 .pvr_value = 0x00410000,
86 .cpu_name = "POWER3 (630+)",
87 .cpu_features = CPU_FTRS_POWER3,
88 .cpu_user_features = COMMON_USER_PPC64,
89 .icache_bsize = 128,
90 .dcache_bsize = 128,
91 .num_pmcs = 8,
92 .cpu_setup = __setup_cpu_power3,
93#ifdef CONFIG_OPROFILE
94 .oprofile_cpu_type = "ppc64/power3",
95 .oprofile_model = &op_model_rs64,
96#endif
97 },
98 { /* Northstar */
99 .pvr_mask = 0xffff0000,
100 .pvr_value = 0x00330000,
101 .cpu_name = "RS64-II (northstar)",
102 .cpu_features = CPU_FTRS_RS64,
103 .cpu_user_features = COMMON_USER_PPC64,
104 .icache_bsize = 128,
105 .dcache_bsize = 128,
106 .num_pmcs = 8,
107 .cpu_setup = __setup_cpu_power3,
108#ifdef CONFIG_OPROFILE
109 .oprofile_cpu_type = "ppc64/rs64",
110 .oprofile_model = &op_model_rs64,
111#endif
112 },
113 { /* Pulsar */
114 .pvr_mask = 0xffff0000,
115 .pvr_value = 0x00340000,
116 .cpu_name = "RS64-III (pulsar)",
117 .cpu_features = CPU_FTRS_RS64,
118 .cpu_user_features = COMMON_USER_PPC64,
119 .icache_bsize = 128,
120 .dcache_bsize = 128,
121 .num_pmcs = 8,
122 .cpu_setup = __setup_cpu_power3,
123#ifdef CONFIG_OPROFILE
124 .oprofile_cpu_type = "ppc64/rs64",
125 .oprofile_model = &op_model_rs64,
126#endif
127 },
128 { /* I-star */
129 .pvr_mask = 0xffff0000,
130 .pvr_value = 0x00360000,
131 .cpu_name = "RS64-III (icestar)",
132 .cpu_features = CPU_FTRS_RS64,
133 .cpu_user_features = COMMON_USER_PPC64,
134 .icache_bsize = 128,
135 .dcache_bsize = 128,
136 .num_pmcs = 8,
137 .cpu_setup = __setup_cpu_power3,
138#ifdef CONFIG_OPROFILE
139 .oprofile_cpu_type = "ppc64/rs64",
140 .oprofile_model = &op_model_rs64,
141#endif
142 },
143 { /* S-star */
144 .pvr_mask = 0xffff0000,
145 .pvr_value = 0x00370000,
146 .cpu_name = "RS64-IV (sstar)",
147 .cpu_features = CPU_FTRS_RS64,
148 .cpu_user_features = COMMON_USER_PPC64,
149 .icache_bsize = 128,
150 .dcache_bsize = 128,
151 .num_pmcs = 8,
152 .cpu_setup = __setup_cpu_power3,
153#ifdef CONFIG_OPROFILE
154 .oprofile_cpu_type = "ppc64/rs64",
155 .oprofile_model = &op_model_rs64,
156#endif
157 },
158 { /* Power4 */
159 .pvr_mask = 0xffff0000,
160 .pvr_value = 0x00350000,
161 .cpu_name = "POWER4 (gp)",
162 .cpu_features = CPU_FTRS_POWER4,
163 .cpu_user_features = COMMON_USER_PPC64,
164 .icache_bsize = 128,
165 .dcache_bsize = 128,
166 .num_pmcs = 8,
167 .cpu_setup = __setup_cpu_power4,
168#ifdef CONFIG_OPROFILE
169 .oprofile_cpu_type = "ppc64/power4",
170 .oprofile_model = &op_model_rs64,
171#endif
172 },
173 { /* Power4+ */
174 .pvr_mask = 0xffff0000,
175 .pvr_value = 0x00380000,
176 .cpu_name = "POWER4+ (gq)",
177 .cpu_features = CPU_FTRS_POWER4,
178 .cpu_user_features = COMMON_USER_PPC64,
179 .icache_bsize = 128,
180 .dcache_bsize = 128,
181 .num_pmcs = 8,
182 .cpu_setup = __setup_cpu_power4,
183#ifdef CONFIG_OPROFILE
184 .oprofile_cpu_type = "ppc64/power4",
185 .oprofile_model = &op_model_power4,
186#endif
187 },
188 { /* PPC970 */
189 .pvr_mask = 0xffff0000,
190 .pvr_value = 0x00390000,
191 .cpu_name = "PPC970",
192 .cpu_features = CPU_FTRS_PPC970,
193 .cpu_user_features = COMMON_USER_PPC64 |
194 PPC_FEATURE_HAS_ALTIVEC_COMP,
195 .icache_bsize = 128,
196 .dcache_bsize = 128,
197 .num_pmcs = 8,
198 .cpu_setup = __setup_cpu_ppc970,
199#ifdef CONFIG_OPROFILE
200 .oprofile_cpu_type = "ppc64/970",
201 .oprofile_model = &op_model_power4,
202#endif
203 },
204#endif /* CONFIG_PPC64 */
205#if defined(CONFIG_PPC64) || defined(CONFIG_POWER4)
206 { /* PPC970FX */
207 .pvr_mask = 0xffff0000,
208 .pvr_value = 0x003c0000,
209 .cpu_name = "PPC970FX",
210#ifdef CONFIG_PPC32
211 .cpu_features = CPU_FTRS_970_32,
212#else
213 .cpu_features = CPU_FTRS_PPC970,
214#endif
215 .cpu_user_features = COMMON_USER_PPC64 |
216 PPC_FEATURE_HAS_ALTIVEC_COMP,
217 .icache_bsize = 128,
218 .dcache_bsize = 128,
219 .num_pmcs = 8,
220 .cpu_setup = __setup_cpu_ppc970,
221#ifdef CONFIG_OPROFILE
222 .oprofile_cpu_type = "ppc64/970",
223 .oprofile_model = &op_model_power4,
224#endif
225 },
226#endif /* defined(CONFIG_PPC64) || defined(CONFIG_POWER4) */
227#ifdef CONFIG_PPC64
228 { /* PPC970MP */
229 .pvr_mask = 0xffff0000,
230 .pvr_value = 0x00440000,
231 .cpu_name = "PPC970MP",
232 .cpu_features = CPU_FTRS_PPC970,
233 .cpu_user_features = COMMON_USER_PPC64 |
234 PPC_FEATURE_HAS_ALTIVEC_COMP,
235 .icache_bsize = 128,
236 .dcache_bsize = 128,
237 .cpu_setup = __setup_cpu_ppc970,
238#ifdef CONFIG_OPROFILE
239 .oprofile_cpu_type = "ppc64/970",
240 .oprofile_model = &op_model_power4,
241#endif
242 },
243 { /* Power5 */
244 .pvr_mask = 0xffff0000,
245 .pvr_value = 0x003a0000,
246 .cpu_name = "POWER5 (gr)",
247 .cpu_features = CPU_FTRS_POWER5,
248 .cpu_user_features = COMMON_USER_PPC64,
249 .icache_bsize = 128,
250 .dcache_bsize = 128,
251 .num_pmcs = 6,
252 .cpu_setup = __setup_cpu_power4,
253#ifdef CONFIG_OPROFILE
254 .oprofile_cpu_type = "ppc64/power5",
255 .oprofile_model = &op_model_power4,
256#endif
257 },
258 { /* Power5 */
259 .pvr_mask = 0xffff0000,
260 .pvr_value = 0x003b0000,
261 .cpu_name = "POWER5 (gs)",
262 .cpu_features = CPU_FTRS_POWER5,
263 .cpu_user_features = COMMON_USER_PPC64,
264 .icache_bsize = 128,
265 .dcache_bsize = 128,
266 .num_pmcs = 6,
267 .cpu_setup = __setup_cpu_power4,
268#ifdef CONFIG_OPROFILE
269 .oprofile_cpu_type = "ppc64/power5",
270 .oprofile_model = &op_model_power4,
271#endif
272 },
273 { /* BE DD1.x */
274 .pvr_mask = 0xffff0000,
275 .pvr_value = 0x00700000,
276 .cpu_name = "Cell Broadband Engine",
277 .cpu_features = CPU_FTRS_CELL,
278 .cpu_user_features = COMMON_USER_PPC64 |
279 PPC_FEATURE_HAS_ALTIVEC_COMP,
280 .icache_bsize = 128,
281 .dcache_bsize = 128,
282 .cpu_setup = __setup_cpu_be,
283 },
284 { /* default match */
285 .pvr_mask = 0x00000000,
286 .pvr_value = 0x00000000,
287 .cpu_name = "POWER4 (compatible)",
288 .cpu_features = CPU_FTRS_COMPATIBLE,
289 .cpu_user_features = COMMON_USER_PPC64,
290 .icache_bsize = 128,
291 .dcache_bsize = 128,
292 .num_pmcs = 6,
293 .cpu_setup = __setup_cpu_power4,
294 }
295#endif /* CONFIG_PPC64 */
296#ifdef CONFIG_PPC32
297#if CLASSIC_PPC
298 { /* 601 */
299 .pvr_mask = 0xffff0000,
300 .pvr_value = 0x00010000,
301 .cpu_name = "601",
302 .cpu_features = CPU_FTRS_PPC601,
303 .cpu_user_features = COMMON_USER | PPC_FEATURE_601_INSTR |
304 PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
305 .icache_bsize = 32,
306 .dcache_bsize = 32,
307 },
308 { /* 603 */
309 .pvr_mask = 0xffff0000,
310 .pvr_value = 0x00030000,
311 .cpu_name = "603",
312 .cpu_features = CPU_FTRS_603,
313 .cpu_user_features = COMMON_USER,
314 .icache_bsize = 32,
315 .dcache_bsize = 32,
316 .cpu_setup = __setup_cpu_603
317 },
318 { /* 603e */
319 .pvr_mask = 0xffff0000,
320 .pvr_value = 0x00060000,
321 .cpu_name = "603e",
322 .cpu_features = CPU_FTRS_603,
323 .cpu_user_features = COMMON_USER,
324 .icache_bsize = 32,
325 .dcache_bsize = 32,
326 .cpu_setup = __setup_cpu_603
327 },
328 { /* 603ev */
329 .pvr_mask = 0xffff0000,
330 .pvr_value = 0x00070000,
331 .cpu_name = "603ev",
332 .cpu_features = CPU_FTRS_603,
333 .cpu_user_features = COMMON_USER,
334 .icache_bsize = 32,
335 .dcache_bsize = 32,
336 .cpu_setup = __setup_cpu_603
337 },
338 { /* 604 */
339 .pvr_mask = 0xffff0000,
340 .pvr_value = 0x00040000,
341 .cpu_name = "604",
342 .cpu_features = CPU_FTRS_604,
343 .cpu_user_features = COMMON_USER,
344 .icache_bsize = 32,
345 .dcache_bsize = 32,
346 .num_pmcs = 2,
347 .cpu_setup = __setup_cpu_604
348 },
349 { /* 604e */
350 .pvr_mask = 0xfffff000,
351 .pvr_value = 0x00090000,
352 .cpu_name = "604e",
353 .cpu_features = CPU_FTRS_604,
354 .cpu_user_features = COMMON_USER,
355 .icache_bsize = 32,
356 .dcache_bsize = 32,
357 .num_pmcs = 4,
358 .cpu_setup = __setup_cpu_604
359 },
360 { /* 604r */
361 .pvr_mask = 0xffff0000,
362 .pvr_value = 0x00090000,
363 .cpu_name = "604r",
364 .cpu_features = CPU_FTRS_604,
365 .cpu_user_features = COMMON_USER,
366 .icache_bsize = 32,
367 .dcache_bsize = 32,
368 .num_pmcs = 4,
369 .cpu_setup = __setup_cpu_604
370 },
371 { /* 604ev */
372 .pvr_mask = 0xffff0000,
373 .pvr_value = 0x000a0000,
374 .cpu_name = "604ev",
375 .cpu_features = CPU_FTRS_604,
376 .cpu_user_features = COMMON_USER,
377 .icache_bsize = 32,
378 .dcache_bsize = 32,
379 .num_pmcs = 4,
380 .cpu_setup = __setup_cpu_604
381 },
382 { /* 740/750 (0x4202, don't support TAU ?) */
383 .pvr_mask = 0xffffffff,
384 .pvr_value = 0x00084202,
385 .cpu_name = "740/750",
386 .cpu_features = CPU_FTRS_740_NOTAU,
387 .cpu_user_features = COMMON_USER,
388 .icache_bsize = 32,
389 .dcache_bsize = 32,
390 .num_pmcs = 4,
391 .cpu_setup = __setup_cpu_750
392 },
393 { /* 750CX (80100 and 8010x?) */
394 .pvr_mask = 0xfffffff0,
395 .pvr_value = 0x00080100,
396 .cpu_name = "750CX",
397 .cpu_features = CPU_FTRS_750,
398 .cpu_user_features = COMMON_USER,
399 .icache_bsize = 32,
400 .dcache_bsize = 32,
401 .num_pmcs = 4,
402 .cpu_setup = __setup_cpu_750cx
403 },
404 { /* 750CX (82201 and 82202) */
405 .pvr_mask = 0xfffffff0,
406 .pvr_value = 0x00082200,
407 .cpu_name = "750CX",
408 .cpu_features = CPU_FTRS_750,
409 .cpu_user_features = COMMON_USER,
410 .icache_bsize = 32,
411 .dcache_bsize = 32,
412 .num_pmcs = 4,
413 .cpu_setup = __setup_cpu_750cx
414 },
415 { /* 750CXe (82214) */
416 .pvr_mask = 0xfffffff0,
417 .pvr_value = 0x00082210,
418 .cpu_name = "750CXe",
419 .cpu_features = CPU_FTRS_750,
420 .cpu_user_features = COMMON_USER,
421 .icache_bsize = 32,
422 .dcache_bsize = 32,
423 .num_pmcs = 4,
424 .cpu_setup = __setup_cpu_750cx
425 },
426 { /* 750CXe "Gekko" (83214) */
427 .pvr_mask = 0xffffffff,
428 .pvr_value = 0x00083214,
429 .cpu_name = "750CXe",
430 .cpu_features = CPU_FTRS_750,
431 .cpu_user_features = COMMON_USER,
432 .icache_bsize = 32,
433 .dcache_bsize = 32,
434 .num_pmcs = 4,
435 .cpu_setup = __setup_cpu_750cx
436 },
437 { /* 745/755 */
438 .pvr_mask = 0xfffff000,
439 .pvr_value = 0x00083000,
440 .cpu_name = "745/755",
441 .cpu_features = CPU_FTRS_750,
442 .cpu_user_features = COMMON_USER,
443 .icache_bsize = 32,
444 .dcache_bsize = 32,
445 .num_pmcs = 4,
446 .cpu_setup = __setup_cpu_750
447 },
448 { /* 750FX rev 1.x */
449 .pvr_mask = 0xffffff00,
450 .pvr_value = 0x70000100,
451 .cpu_name = "750FX",
452 .cpu_features = CPU_FTRS_750FX1,
453 .cpu_user_features = COMMON_USER,
454 .icache_bsize = 32,
455 .dcache_bsize = 32,
456 .num_pmcs = 4,
457 .cpu_setup = __setup_cpu_750
458 },
459 { /* 750FX rev 2.0 must disable HID0[DPM] */
460 .pvr_mask = 0xffffffff,
461 .pvr_value = 0x70000200,
462 .cpu_name = "750FX",
463 .cpu_features = CPU_FTRS_750FX2,
464 .cpu_user_features = COMMON_USER,
465 .icache_bsize = 32,
466 .dcache_bsize = 32,
467 .num_pmcs = 4,
468 .cpu_setup = __setup_cpu_750
469 },
470 { /* 750FX (All revs except 2.0) */
471 .pvr_mask = 0xffff0000,
472 .pvr_value = 0x70000000,
473 .cpu_name = "750FX",
474 .cpu_features = CPU_FTRS_750FX,
475 .cpu_user_features = COMMON_USER,
476 .icache_bsize = 32,
477 .dcache_bsize = 32,
478 .num_pmcs = 4,
479 .cpu_setup = __setup_cpu_750fx
480 },
481 { /* 750GX */
482 .pvr_mask = 0xffff0000,
483 .pvr_value = 0x70020000,
484 .cpu_name = "750GX",
485 .cpu_features = CPU_FTRS_750GX,
486 .cpu_user_features = COMMON_USER,
487 .icache_bsize = 32,
488 .dcache_bsize = 32,
489 .num_pmcs = 4,
490 .cpu_setup = __setup_cpu_750fx
491 },
492 { /* 740/750 (L2CR bit need fixup for 740) */
493 .pvr_mask = 0xffff0000,
494 .pvr_value = 0x00080000,
495 .cpu_name = "740/750",
496 .cpu_features = CPU_FTRS_740,
497 .cpu_user_features = COMMON_USER,
498 .icache_bsize = 32,
499 .dcache_bsize = 32,
500 .num_pmcs = 4,
501 .cpu_setup = __setup_cpu_750
502 },
503 { /* 7400 rev 1.1 ? (no TAU) */
504 .pvr_mask = 0xffffffff,
505 .pvr_value = 0x000c1101,
506 .cpu_name = "7400 (1.1)",
507 .cpu_features = CPU_FTRS_7400_NOTAU,
508 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
509 .icache_bsize = 32,
510 .dcache_bsize = 32,
511 .num_pmcs = 4,
512 .cpu_setup = __setup_cpu_7400
513 },
514 { /* 7400 */
515 .pvr_mask = 0xffff0000,
516 .pvr_value = 0x000c0000,
517 .cpu_name = "7400",
518 .cpu_features = CPU_FTRS_7400,
519 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
520 .icache_bsize = 32,
521 .dcache_bsize = 32,
522 .num_pmcs = 4,
523 .cpu_setup = __setup_cpu_7400
524 },
525 { /* 7410 */
526 .pvr_mask = 0xffff0000,
527 .pvr_value = 0x800c0000,
528 .cpu_name = "7410",
529 .cpu_features = CPU_FTRS_7400,
530 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
531 .icache_bsize = 32,
532 .dcache_bsize = 32,
533 .num_pmcs = 4,
534 .cpu_setup = __setup_cpu_7410
535 },
536 { /* 7450 2.0 - no doze/nap */
537 .pvr_mask = 0xffffffff,
538 .pvr_value = 0x80000200,
539 .cpu_name = "7450",
540 .cpu_features = CPU_FTRS_7450_20,
541 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
542 .icache_bsize = 32,
543 .dcache_bsize = 32,
544 .num_pmcs = 6,
545 .cpu_setup = __setup_cpu_745x
546 },
547 { /* 7450 2.1 */
548 .pvr_mask = 0xffffffff,
549 .pvr_value = 0x80000201,
550 .cpu_name = "7450",
551 .cpu_features = CPU_FTRS_7450_21,
552 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
553 .icache_bsize = 32,
554 .dcache_bsize = 32,
555 .num_pmcs = 6,
556 .cpu_setup = __setup_cpu_745x
557 },
558 { /* 7450 2.3 and newer */
559 .pvr_mask = 0xffff0000,
560 .pvr_value = 0x80000000,
561 .cpu_name = "7450",
562 .cpu_features = CPU_FTRS_7450_23,
563 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
564 .icache_bsize = 32,
565 .dcache_bsize = 32,
566 .num_pmcs = 6,
567 .cpu_setup = __setup_cpu_745x
568 },
569 { /* 7455 rev 1.x */
570 .pvr_mask = 0xffffff00,
571 .pvr_value = 0x80010100,
572 .cpu_name = "7455",
573 .cpu_features = CPU_FTRS_7455_1,
574 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
575 .icache_bsize = 32,
576 .dcache_bsize = 32,
577 .num_pmcs = 6,
578 .cpu_setup = __setup_cpu_745x
579 },
580 { /* 7455 rev 2.0 */
581 .pvr_mask = 0xffffffff,
582 .pvr_value = 0x80010200,
583 .cpu_name = "7455",
584 .cpu_features = CPU_FTRS_7455_20,
585 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
586 .icache_bsize = 32,
587 .dcache_bsize = 32,
588 .num_pmcs = 6,
589 .cpu_setup = __setup_cpu_745x
590 },
591 { /* 7455 others */
592 .pvr_mask = 0xffff0000,
593 .pvr_value = 0x80010000,
594 .cpu_name = "7455",
595 .cpu_features = CPU_FTRS_7455,
596 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
597 .icache_bsize = 32,
598 .dcache_bsize = 32,
599 .num_pmcs = 6,
600 .cpu_setup = __setup_cpu_745x
601 },
602 { /* 7447/7457 Rev 1.0 */
603 .pvr_mask = 0xffffffff,
604 .pvr_value = 0x80020100,
605 .cpu_name = "7447/7457",
606 .cpu_features = CPU_FTRS_7447_10,
607 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
608 .icache_bsize = 32,
609 .dcache_bsize = 32,
610 .num_pmcs = 6,
611 .cpu_setup = __setup_cpu_745x
612 },
613 { /* 7447/7457 Rev 1.1 */
614 .pvr_mask = 0xffffffff,
615 .pvr_value = 0x80020101,
616 .cpu_name = "7447/7457",
617 .cpu_features = CPU_FTRS_7447_10,
618 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
619 .icache_bsize = 32,
620 .dcache_bsize = 32,
621 .num_pmcs = 6,
622 .cpu_setup = __setup_cpu_745x
623 },
624 { /* 7447/7457 Rev 1.2 and later */
625 .pvr_mask = 0xffff0000,
626 .pvr_value = 0x80020000,
627 .cpu_name = "7447/7457",
628 .cpu_features = CPU_FTRS_7447,
629 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
630 .icache_bsize = 32,
631 .dcache_bsize = 32,
632 .num_pmcs = 6,
633 .cpu_setup = __setup_cpu_745x
634 },
635 { /* 7447A */
636 .pvr_mask = 0xffff0000,
637 .pvr_value = 0x80030000,
638 .cpu_name = "7447A",
639 .cpu_features = CPU_FTRS_7447A,
640 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
641 .icache_bsize = 32,
642 .dcache_bsize = 32,
643 .num_pmcs = 6,
644 .cpu_setup = __setup_cpu_745x
645 },
646 { /* 7448 */
647 .pvr_mask = 0xffff0000,
648 .pvr_value = 0x80040000,
649 .cpu_name = "7448",
650 .cpu_features = CPU_FTRS_7447A,
651 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
652 .icache_bsize = 32,
653 .dcache_bsize = 32,
654 .num_pmcs = 6,
655 .cpu_setup = __setup_cpu_745x
656 },
657 { /* 82xx (8240, 8245, 8260 are all 603e cores) */
658 .pvr_mask = 0x7fff0000,
659 .pvr_value = 0x00810000,
660 .cpu_name = "82xx",
661 .cpu_features = CPU_FTRS_82XX,
662 .cpu_user_features = COMMON_USER,
663 .icache_bsize = 32,
664 .dcache_bsize = 32,
665 .cpu_setup = __setup_cpu_603
666 },
667 { /* All G2_LE (603e core, plus some) have the same pvr */
668 .pvr_mask = 0x7fff0000,
669 .pvr_value = 0x00820000,
670 .cpu_name = "G2_LE",
671 .cpu_features = CPU_FTRS_G2_LE,
672 .cpu_user_features = COMMON_USER,
673 .icache_bsize = 32,
674 .dcache_bsize = 32,
675 .cpu_setup = __setup_cpu_603
676 },
677 { /* e300 (a 603e core, plus some) on 83xx */
678 .pvr_mask = 0x7fff0000,
679 .pvr_value = 0x00830000,
680 .cpu_name = "e300",
681 .cpu_features = CPU_FTRS_E300,
682 .cpu_user_features = COMMON_USER,
683 .icache_bsize = 32,
684 .dcache_bsize = 32,
685 .cpu_setup = __setup_cpu_603
686 },
687 { /* default match, we assume split I/D cache & TB (non-601)... */
688 .pvr_mask = 0x00000000,
689 .pvr_value = 0x00000000,
690 .cpu_name = "(generic PPC)",
691 .cpu_features = CPU_FTRS_CLASSIC32,
692 .cpu_user_features = COMMON_USER,
693 .icache_bsize = 32,
694 .dcache_bsize = 32,
695 },
696#endif /* CLASSIC_PPC */
697#ifdef CONFIG_8xx
698 { /* 8xx */
699 .pvr_mask = 0xffff0000,
700 .pvr_value = 0x00500000,
701 .cpu_name = "8xx",
702 /* CPU_FTR_MAYBE_CAN_DOZE is possible,
703 * if the 8xx code is there.... */
704 .cpu_features = CPU_FTRS_8XX,
705 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
706 .icache_bsize = 16,
707 .dcache_bsize = 16,
708 },
709#endif /* CONFIG_8xx */
710#ifdef CONFIG_40x
711 { /* 403GC */
712 .pvr_mask = 0xffffff00,
713 .pvr_value = 0x00200200,
714 .cpu_name = "403GC",
715 .cpu_features = CPU_FTRS_40X,
716 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
717 .icache_bsize = 16,
718 .dcache_bsize = 16,
719 },
720 { /* 403GCX */
721 .pvr_mask = 0xffffff00,
722 .pvr_value = 0x00201400,
723 .cpu_name = "403GCX",
724 .cpu_features = CPU_FTRS_40X,
725 .cpu_user_features = PPC_FEATURE_32 |
726 PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
727 .icache_bsize = 16,
728 .dcache_bsize = 16,
729 },
730 { /* 403G ?? */
731 .pvr_mask = 0xffff0000,
732 .pvr_value = 0x00200000,
733 .cpu_name = "403G ??",
734 .cpu_features = CPU_FTRS_40X,
735 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
736 .icache_bsize = 16,
737 .dcache_bsize = 16,
738 },
739 { /* 405GP */
740 .pvr_mask = 0xffff0000,
741 .pvr_value = 0x40110000,
742 .cpu_name = "405GP",
743 .cpu_features = CPU_FTRS_40X,
744 .cpu_user_features = PPC_FEATURE_32 |
745 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
746 .icache_bsize = 32,
747 .dcache_bsize = 32,
748 },
749 { /* STB 03xxx */
750 .pvr_mask = 0xffff0000,
751 .pvr_value = 0x40130000,
752 .cpu_name = "STB03xxx",
753 .cpu_features = CPU_FTRS_40X,
754 .cpu_user_features = PPC_FEATURE_32 |
755 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
756 .icache_bsize = 32,
757 .dcache_bsize = 32,
758 },
759 { /* STB 04xxx */
760 .pvr_mask = 0xffff0000,
761 .pvr_value = 0x41810000,
762 .cpu_name = "STB04xxx",
763 .cpu_features = CPU_FTRS_40X,
764 .cpu_user_features = PPC_FEATURE_32 |
765 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
766 .icache_bsize = 32,
767 .dcache_bsize = 32,
768 },
769 { /* NP405L */
770 .pvr_mask = 0xffff0000,
771 .pvr_value = 0x41610000,
772 .cpu_name = "NP405L",
773 .cpu_features = CPU_FTRS_40X,
774 .cpu_user_features = PPC_FEATURE_32 |
775 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
776 .icache_bsize = 32,
777 .dcache_bsize = 32,
778 },
779 { /* NP4GS3 */
780 .pvr_mask = 0xffff0000,
781 .pvr_value = 0x40B10000,
782 .cpu_name = "NP4GS3",
783 .cpu_features = CPU_FTRS_40X,
784 .cpu_user_features = PPC_FEATURE_32 |
785 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
786 .icache_bsize = 32,
787 .dcache_bsize = 32,
788 },
789 { /* NP405H */
790 .pvr_mask = 0xffff0000,
791 .pvr_value = 0x41410000,
792 .cpu_name = "NP405H",
793 .cpu_features = CPU_FTRS_40X,
794 .cpu_user_features = PPC_FEATURE_32 |
795 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
796 .icache_bsize = 32,
797 .dcache_bsize = 32,
798 },
799 { /* 405GPr */
800 .pvr_mask = 0xffff0000,
801 .pvr_value = 0x50910000,
802 .cpu_name = "405GPr",
803 .cpu_features = CPU_FTRS_40X,
804 .cpu_user_features = PPC_FEATURE_32 |
805 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
806 .icache_bsize = 32,
807 .dcache_bsize = 32,
808 },
809 { /* STBx25xx */
810 .pvr_mask = 0xffff0000,
811 .pvr_value = 0x51510000,
812 .cpu_name = "STBx25xx",
813 .cpu_features = CPU_FTRS_40X,
814 .cpu_user_features = PPC_FEATURE_32 |
815 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
816 .icache_bsize = 32,
817 .dcache_bsize = 32,
818 },
819 { /* 405LP */
820 .pvr_mask = 0xffff0000,
821 .pvr_value = 0x41F10000,
822 .cpu_name = "405LP",
823 .cpu_features = CPU_FTRS_40X,
824 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
825 .icache_bsize = 32,
826 .dcache_bsize = 32,
827 },
828 { /* Xilinx Virtex-II Pro */
829 .pvr_mask = 0xffff0000,
830 .pvr_value = 0x20010000,
831 .cpu_name = "Virtex-II Pro",
832 .cpu_features = CPU_FTRS_40X,
833 .cpu_user_features = PPC_FEATURE_32 |
834 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
835 .icache_bsize = 32,
836 .dcache_bsize = 32,
837 },
838 { /* 405EP */
839 .pvr_mask = 0xffff0000,
840 .pvr_value = 0x51210000,
841 .cpu_name = "405EP",
842 .cpu_features = CPU_FTRS_40X,
843 .cpu_user_features = PPC_FEATURE_32 |
844 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
845 .icache_bsize = 32,
846 .dcache_bsize = 32,
847 },
848
849#endif /* CONFIG_40x */
850#ifdef CONFIG_44x
851 {
852 .pvr_mask = 0xf0000fff,
853 .pvr_value = 0x40000850,
854 .cpu_name = "440EP Rev. A",
855 .cpu_features = CPU_FTRS_44X,
856 .cpu_user_features = COMMON_USER, /* 440EP has an FPU */
857 .icache_bsize = 32,
858 .dcache_bsize = 32,
859 },
860 {
861 .pvr_mask = 0xf0000fff,
862 .pvr_value = 0x400008d3,
863 .cpu_name = "440EP Rev. B",
864 .cpu_features = CPU_FTRS_44X,
865 .cpu_user_features = COMMON_USER, /* 440EP has an FPU */
866 .icache_bsize = 32,
867 .dcache_bsize = 32,
868 },
869 { /* 440GP Rev. B */
870 .pvr_mask = 0xf0000fff,
871 .pvr_value = 0x40000440,
872 .cpu_name = "440GP Rev. B",
873 .cpu_features = CPU_FTRS_44X,
874 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
875 .icache_bsize = 32,
876 .dcache_bsize = 32,
877 },
878 { /* 440GP Rev. C */
879 .pvr_mask = 0xf0000fff,
880 .pvr_value = 0x40000481,
881 .cpu_name = "440GP Rev. C",
882 .cpu_features = CPU_FTRS_44X,
883 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
884 .icache_bsize = 32,
885 .dcache_bsize = 32,
886 },
887 { /* 440GX Rev. A */
888 .pvr_mask = 0xf0000fff,
889 .pvr_value = 0x50000850,
890 .cpu_name = "440GX Rev. A",
891 .cpu_features = CPU_FTRS_44X,
892 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
893 .icache_bsize = 32,
894 .dcache_bsize = 32,
895 },
896 { /* 440GX Rev. B */
897 .pvr_mask = 0xf0000fff,
898 .pvr_value = 0x50000851,
899 .cpu_name = "440GX Rev. B",
900 .cpu_features = CPU_FTRS_44X,
901 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
902 .icache_bsize = 32,
903 .dcache_bsize = 32,
904 },
905 { /* 440GX Rev. C */
906 .pvr_mask = 0xf0000fff,
907 .pvr_value = 0x50000892,
908 .cpu_name = "440GX Rev. C",
909 .cpu_features = CPU_FTRS_44X,
910 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
911 .icache_bsize = 32,
912 .dcache_bsize = 32,
913 },
914 { /* 440GX Rev. F */
915 .pvr_mask = 0xf0000fff,
916 .pvr_value = 0x50000894,
917 .cpu_name = "440GX Rev. F",
918 .cpu_features = CPU_FTRS_44X,
919 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
920 .icache_bsize = 32,
921 .dcache_bsize = 32,
922 },
923 { /* 440SP Rev. A */
924 .pvr_mask = 0xff000fff,
925 .pvr_value = 0x53000891,
926 .cpu_name = "440SP Rev. A",
927 .cpu_features = CPU_FTRS_44X,
928 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
929 .icache_bsize = 32,
930 .dcache_bsize = 32,
931 },
932#endif /* CONFIG_44x */
933#ifdef CONFIG_FSL_BOOKE
934 { /* e200z5 */
935 .pvr_mask = 0xfff00000,
936 .pvr_value = 0x81000000,
937 .cpu_name = "e200z5",
938 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
939 .cpu_features = CPU_FTRS_E200,
940 .cpu_user_features = PPC_FEATURE_32 |
941 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_EFP_SINGLE |
942 PPC_FEATURE_UNIFIED_CACHE,
943 .dcache_bsize = 32,
944 },
945 { /* e200z6 */
946 .pvr_mask = 0xfff00000,
947 .pvr_value = 0x81100000,
948 .cpu_name = "e200z6",
949 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
950 .cpu_features = CPU_FTRS_E200,
951 .cpu_user_features = PPC_FEATURE_32 |
952 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
953 PPC_FEATURE_HAS_EFP_SINGLE |
954 PPC_FEATURE_UNIFIED_CACHE,
955 .dcache_bsize = 32,
956 },
957 { /* e500 */
958 .pvr_mask = 0xffff0000,
959 .pvr_value = 0x80200000,
960 .cpu_name = "e500",
961 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
962 .cpu_features = CPU_FTRS_E500,
963 .cpu_user_features = PPC_FEATURE_32 |
964 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
965 PPC_FEATURE_HAS_EFP_SINGLE,
966 .icache_bsize = 32,
967 .dcache_bsize = 32,
968 .num_pmcs = 4,
969 },
970 { /* e500v2 */
971 .pvr_mask = 0xffff0000,
972 .pvr_value = 0x80210000,
973 .cpu_name = "e500v2",
974 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
975 .cpu_features = CPU_FTRS_E500_2,
976 .cpu_user_features = PPC_FEATURE_32 |
977 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
978 PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_HAS_EFP_DOUBLE,
979 .icache_bsize = 32,
980 .dcache_bsize = 32,
981 .num_pmcs = 4,
982 },
983#endif
984#if !CLASSIC_PPC
985 { /* default match */
986 .pvr_mask = 0x00000000,
987 .pvr_value = 0x00000000,
988 .cpu_name = "(generic PPC)",
989 .cpu_features = CPU_FTRS_GENERIC_32,
990 .cpu_user_features = PPC_FEATURE_32,
991 .icache_bsize = 32,
992 .dcache_bsize = 32,
993 }
994#endif /* !CLASSIC_PPC */
995#endif /* CONFIG_PPC32 */
996};
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
new file mode 100644
index 000000000000..2e99ae41723c
--- /dev/null
+++ b/arch/powerpc/kernel/entry_32.S
@@ -0,0 +1,1000 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/config.h>
23#include <linux/errno.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34
35#undef SHOW_SYSCALLS
36#undef SHOW_SYSCALLS_TASK
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x) li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
48#include "head_booke.h"
49#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
50 mtspr exc_level##_SPRG,r8; \
51 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
52 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
53 stw r0,GPR10(r11); \
54 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
55 stw r0,GPR11(r11); \
56 mfspr r8,exc_level##_SPRG
57
58 .globl mcheck_transfer_to_handler
59mcheck_transfer_to_handler:
60 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
61 b transfer_to_handler_full
62
63 .globl debug_transfer_to_handler
64debug_transfer_to_handler:
65 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
66 b transfer_to_handler_full
67
68 .globl crit_transfer_to_handler
69crit_transfer_to_handler:
70 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
71 /* fall through */
72#endif
73
74#ifdef CONFIG_40x
75 .globl crit_transfer_to_handler
76crit_transfer_to_handler:
77 lwz r0,crit_r10@l(0)
78 stw r0,GPR10(r11)
79 lwz r0,crit_r11@l(0)
80 stw r0,GPR11(r11)
81 /* fall through */
82#endif
83
84/*
85 * This code finishes saving the registers to the exception frame
86 * and jumps to the appropriate handler for the exception, turning
87 * on address translation.
88 * Note that we rely on the caller having set cr0.eq iff the exception
89 * occurred in kernel mode (i.e. MSR:PR = 0).
90 */
91 .globl transfer_to_handler_full
92transfer_to_handler_full:
93 SAVE_NVGPRS(r11)
94 /* fall through */
95
96 .globl transfer_to_handler
97transfer_to_handler:
98 stw r2,GPR2(r11)
99 stw r12,_NIP(r11)
100 stw r9,_MSR(r11)
101 andi. r2,r9,MSR_PR
102 mfctr r12
103 mfspr r2,SPRN_XER
104 stw r12,_CTR(r11)
105 stw r2,_XER(r11)
106 mfspr r12,SPRN_SPRG3
107 addi r2,r12,-THREAD
108 tovirt(r2,r2) /* set r2 to current */
109 beq 2f /* if from user, fix up THREAD.regs */
110 addi r11,r1,STACK_FRAME_OVERHEAD
111 stw r11,PT_REGS(r12)
112#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
113 /* Check to see if the dbcr0 register is set up to debug. Use the
114 single-step bit to do this. */
115 lwz r12,THREAD_DBCR0(r12)
116 andis. r12,r12,DBCR0_IC@h
117 beq+ 3f
118 /* From user and task is ptraced - load up global dbcr0 */
119 li r12,-1 /* clear all pending debug events */
120 mtspr SPRN_DBSR,r12
121 lis r11,global_dbcr0@ha
122 tophys(r11,r11)
123 addi r11,r11,global_dbcr0@l
124 lwz r12,0(r11)
125 mtspr SPRN_DBCR0,r12
126 lwz r12,4(r11)
127 addi r12,r12,-1
128 stw r12,4(r11)
129#endif
130 b 3f
1312: /* if from kernel, check interrupted DOZE/NAP mode and
132 * check for stack overflow
133 */
134#ifdef CONFIG_6xx
135 mfspr r11,SPRN_HID0
136 mtcr r11
137BEGIN_FTR_SECTION
138 bt- 8,power_save_6xx_restore /* Check DOZE */
139END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
140BEGIN_FTR_SECTION
141 bt- 9,power_save_6xx_restore /* Check NAP */
142END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
143#endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont
145transfer_to_handler_cont:
146 lwz r11,THREAD_INFO-THREAD(r12)
147 cmplw r1,r11 /* if r1 <= current->thread_info */
148 ble- stack_ovf /* then the kernel stack overflowed */
1493:
150 mflr r9
151 lwz r11,0(r9) /* virtual address of handler */
152 lwz r9,4(r9) /* where to go when done */
153 FIX_SRR1(r10,r12)
154 mtspr SPRN_SRR0,r11
155 mtspr SPRN_SRR1,r10
156 mtlr r9
157 SYNC
158 RFI /* jump to handler, enable MMU */
159
160/*
161 * On kernel stack overflow, load up an initial stack pointer
162 * and call StackOverflow(regs), which should not return.
163 */
164stack_ovf:
165 /* sometimes we use a statically-allocated stack, which is OK. */
166 lis r11,_end@h
167 ori r11,r11,_end@l
168 cmplw r1,r11
169 ble 3b /* r1 <= &_end is OK */
170 SAVE_NVGPRS(r11)
171 addi r3,r1,STACK_FRAME_OVERHEAD
172 lis r1,init_thread_union@ha
173 addi r1,r1,init_thread_union@l
174 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
175 lis r9,StackOverflow@ha
176 addi r9,r9,StackOverflow@l
177 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
178 FIX_SRR1(r10,r12)
179 mtspr SPRN_SRR0,r9
180 mtspr SPRN_SRR1,r10
181 SYNC
182 RFI
183
184/*
185 * Handle a system call.
186 */
187 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
188 .stabs "entry_32.S",N_SO,0,0,0f
1890:
190
191_GLOBAL(DoSyscall)
192 stw r0,THREAD+LAST_SYSCALL(r2)
193 stw r3,ORIG_GPR3(r1)
194 li r12,0
195 stw r12,RESULT(r1)
196 lwz r11,_CCR(r1) /* Clear SO bit in CR */
197 rlwinm r11,r11,0,4,2
198 stw r11,_CCR(r1)
199#ifdef SHOW_SYSCALLS
200 bl do_show_syscall
201#endif /* SHOW_SYSCALLS */
202 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
203 li r11,0
204 stb r11,TI_SC_NOERR(r10)
205 lwz r11,TI_FLAGS(r10)
206 andi. r11,r11,_TIF_SYSCALL_T_OR_A
207 bne- syscall_dotrace
208syscall_dotrace_cont:
209 cmplwi 0,r0,NR_syscalls
210 lis r10,sys_call_table@h
211 ori r10,r10,sys_call_table@l
212 slwi r0,r0,2
213 bge- 66f
214 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
215 mtlr r10
216 addi r9,r1,STACK_FRAME_OVERHEAD
217 PPC440EP_ERR42
218 blrl /* Call handler */
219 .globl ret_from_syscall
220ret_from_syscall:
221#ifdef SHOW_SYSCALLS
222 bl do_show_syscall_exit
223#endif
224 mr r6,r3
225 li r11,-_LAST_ERRNO
226 cmplw 0,r3,r11
227 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
228 blt+ 30f
229 lbz r11,TI_SC_NOERR(r12)
230 cmpwi r11,0
231 bne 30f
232 neg r3,r3
233 lwz r10,_CCR(r1) /* Set SO bit in CR */
234 oris r10,r10,0x1000
235 stw r10,_CCR(r1)
236
237 /* disable interrupts so current_thread_info()->flags can't change */
23830: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
239 SYNC
240 MTMSRD(r10)
241 lwz r9,TI_FLAGS(r12)
242 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
243 bne- syscall_exit_work
244syscall_exit_cont:
245#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
246 /* If the process has its own DBCR0 value, load it up. The single
247 step bit tells us that dbcr0 should be loaded. */
248 lwz r0,THREAD+THREAD_DBCR0(r2)
249 andis. r10,r0,DBCR0_IC@h
250 bnel- load_dbcr0
251#endif
252 stwcx. r0,0,r1 /* to clear the reservation */
253 lwz r4,_LINK(r1)
254 lwz r5,_CCR(r1)
255 mtlr r4
256 mtcr r5
257 lwz r7,_NIP(r1)
258 lwz r8,_MSR(r1)
259 FIX_SRR1(r8, r0)
260 lwz r2,GPR2(r1)
261 lwz r1,GPR1(r1)
262 mtspr SPRN_SRR0,r7
263 mtspr SPRN_SRR1,r8
264 SYNC
265 RFI
266
26766: li r3,-ENOSYS
268 b ret_from_syscall
269
270 .globl ret_from_fork
271ret_from_fork:
272 REST_NVGPRS(r1)
273 bl schedule_tail
274 li r3,0
275 b ret_from_syscall
276
277/* Traced system call support */
278syscall_dotrace:
279 SAVE_NVGPRS(r1)
280 li r0,0xc00
281 stw r0,_TRAP(r1)
282 addi r3,r1,STACK_FRAME_OVERHEAD
283 bl do_syscall_trace_enter
284 lwz r0,GPR0(r1) /* Restore original registers */
285 lwz r3,GPR3(r1)
286 lwz r4,GPR4(r1)
287 lwz r5,GPR5(r1)
288 lwz r6,GPR6(r1)
289 lwz r7,GPR7(r1)
290 lwz r8,GPR8(r1)
291 REST_NVGPRS(r1)
292 b syscall_dotrace_cont
293
294syscall_exit_work:
295 stw r6,RESULT(r1) /* Save result */
296 stw r3,GPR3(r1) /* Update return value */
297 andi. r0,r9,_TIF_SYSCALL_T_OR_A
298 beq 5f
299 ori r10,r10,MSR_EE
300 SYNC
301 MTMSRD(r10) /* re-enable interrupts */
302 lwz r4,_TRAP(r1)
303 andi. r4,r4,1
304 beq 4f
305 SAVE_NVGPRS(r1)
306 li r4,0xc00
307 stw r4,_TRAP(r1)
3084:
309 addi r3,r1,STACK_FRAME_OVERHEAD
310 bl do_syscall_trace_leave
311 REST_NVGPRS(r1)
3122:
313 lwz r3,GPR3(r1)
314 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
315 SYNC
316 MTMSRD(r10) /* disable interrupts again */
317 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
318 lwz r9,TI_FLAGS(r12)
3195:
320 andi. r0,r9,_TIF_NEED_RESCHED
321 bne 1f
322 lwz r5,_MSR(r1)
323 andi. r5,r5,MSR_PR
324 beq syscall_exit_cont
325 andi. r0,r9,_TIF_SIGPENDING
326 beq syscall_exit_cont
327 b do_user_signal
3281:
329 ori r10,r10,MSR_EE
330 SYNC
331 MTMSRD(r10) /* re-enable interrupts */
332 bl schedule
333 b 2b
334
335#ifdef SHOW_SYSCALLS
336do_show_syscall:
337#ifdef SHOW_SYSCALLS_TASK
338 lis r11,show_syscalls_task@ha
339 lwz r11,show_syscalls_task@l(r11)
340 cmp 0,r2,r11
341 bnelr
342#endif
343 stw r31,GPR31(r1)
344 mflr r31
345 lis r3,7f@ha
346 addi r3,r3,7f@l
347 lwz r4,GPR0(r1)
348 lwz r5,GPR3(r1)
349 lwz r6,GPR4(r1)
350 lwz r7,GPR5(r1)
351 lwz r8,GPR6(r1)
352 lwz r9,GPR7(r1)
353 bl printk
354 lis r3,77f@ha
355 addi r3,r3,77f@l
356 lwz r4,GPR8(r1)
357 mr r5,r2
358 bl printk
359 lwz r0,GPR0(r1)
360 lwz r3,GPR3(r1)
361 lwz r4,GPR4(r1)
362 lwz r5,GPR5(r1)
363 lwz r6,GPR6(r1)
364 lwz r7,GPR7(r1)
365 lwz r8,GPR8(r1)
366 mtlr r31
367 lwz r31,GPR31(r1)
368 blr
369
370do_show_syscall_exit:
371#ifdef SHOW_SYSCALLS_TASK
372 lis r11,show_syscalls_task@ha
373 lwz r11,show_syscalls_task@l(r11)
374 cmp 0,r2,r11
375 bnelr
376#endif
377 stw r31,GPR31(r1)
378 mflr r31
379 stw r3,RESULT(r1) /* Save result */
380 mr r4,r3
381 lis r3,79f@ha
382 addi r3,r3,79f@l
383 bl printk
384 lwz r3,RESULT(r1)
385 mtlr r31
386 lwz r31,GPR31(r1)
387 blr
388
3897: .string "syscall %d(%x, %x, %x, %x, %x, "
39077: .string "%x), current=%p\n"
39179: .string " -> %x\n"
392 .align 2,0
393
394#ifdef SHOW_SYSCALLS_TASK
395 .data
396 .globl show_syscalls_task
397show_syscalls_task:
398 .long -1
399 .text
400#endif
401#endif /* SHOW_SYSCALLS */
402
403/*
404 * The sigsuspend and rt_sigsuspend system calls can call do_signal
405 * and thus put the process into the stopped state where we might
406 * want to examine its user state with ptrace. Therefore we need
407 * to save all the nonvolatile registers (r13 - r31) before calling
408 * the C code.
409 */
410 .globl ppc_sigsuspend
411ppc_sigsuspend:
412 SAVE_NVGPRS(r1)
413 lwz r0,_TRAP(r1)
414 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
415 stw r0,_TRAP(r1) /* register set saved */
416 b sys_sigsuspend
417
418 .globl ppc_rt_sigsuspend
419ppc_rt_sigsuspend:
420 SAVE_NVGPRS(r1)
421 lwz r0,_TRAP(r1)
422 rlwinm r0,r0,0,0,30
423 stw r0,_TRAP(r1)
424 b sys_rt_sigsuspend
425
426 .globl ppc_fork
427ppc_fork:
428 SAVE_NVGPRS(r1)
429 lwz r0,_TRAP(r1)
430 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
431 stw r0,_TRAP(r1) /* register set saved */
432 b sys_fork
433
434 .globl ppc_vfork
435ppc_vfork:
436 SAVE_NVGPRS(r1)
437 lwz r0,_TRAP(r1)
438 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
439 stw r0,_TRAP(r1) /* register set saved */
440 b sys_vfork
441
442 .globl ppc_clone
443ppc_clone:
444 SAVE_NVGPRS(r1)
445 lwz r0,_TRAP(r1)
446 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
447 stw r0,_TRAP(r1) /* register set saved */
448 b sys_clone
449
450 .globl ppc_swapcontext
451ppc_swapcontext:
452 SAVE_NVGPRS(r1)
453 lwz r0,_TRAP(r1)
454 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
455 stw r0,_TRAP(r1) /* register set saved */
456 b sys_swapcontext
457
458/*
459 * Top-level page fault handling.
460 * This is in assembler because if do_page_fault tells us that
461 * it is a bad kernel page fault, we want to save the non-volatile
462 * registers before calling bad_page_fault.
463 */
464 .globl handle_page_fault
465handle_page_fault:
466 stw r4,_DAR(r1)
467 addi r3,r1,STACK_FRAME_OVERHEAD
468 bl do_page_fault
469 cmpwi r3,0
470 beq+ ret_from_except
471 SAVE_NVGPRS(r1)
472 lwz r0,_TRAP(r1)
473 clrrwi r0,r0,1
474 stw r0,_TRAP(r1)
475 mr r5,r3
476 addi r3,r1,STACK_FRAME_OVERHEAD
477 lwz r4,_DAR(r1)
478 bl bad_page_fault
479 b ret_from_except_full
480
481/*
482 * This routine switches between two different tasks. The process
483 * state of one is saved on its kernel stack. Then the state
484 * of the other is restored from its kernel stack. The memory
485 * management hardware is updated to the second process's state.
486 * Finally, we can return to the second process.
487 * On entry, r3 points to the THREAD for the current task, r4
488 * points to the THREAD for the new task.
489 *
490 * This routine is always called with interrupts disabled.
491 *
492 * Note: there are two ways to get to the "going out" portion
493 * of this code; either by coming in via the entry (_switch)
494 * or via "fork" which must set up an environment equivalent
495 * to the "_switch" path. If you change this , you'll have to
496 * change the fork code also.
497 *
498 * The code which creates the new task context is in 'copy_thread'
499 * in arch/ppc/kernel/process.c
500 */
501_GLOBAL(_switch)
502 stwu r1,-INT_FRAME_SIZE(r1)
503 mflr r0
504 stw r0,INT_FRAME_SIZE+4(r1)
505 /* r3-r12 are caller saved -- Cort */
506 SAVE_NVGPRS(r1)
507 stw r0,_NIP(r1) /* Return to switch caller */
508 mfmsr r11
509 li r0,MSR_FP /* Disable floating-point */
510#ifdef CONFIG_ALTIVEC
511BEGIN_FTR_SECTION
512 oris r0,r0,MSR_VEC@h /* Disable altivec */
513 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
514 stw r12,THREAD+THREAD_VRSAVE(r2)
515END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
516#endif /* CONFIG_ALTIVEC */
517#ifdef CONFIG_SPE
518 oris r0,r0,MSR_SPE@h /* Disable SPE */
519 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
520 stw r12,THREAD+THREAD_SPEFSCR(r2)
521#endif /* CONFIG_SPE */
522 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
523 beq+ 1f
524 andc r11,r11,r0
525 MTMSRD(r11)
526 isync
5271: stw r11,_MSR(r1)
528 mfcr r10
529 stw r10,_CCR(r1)
530 stw r1,KSP(r3) /* Set old stack pointer */
531
532#ifdef CONFIG_SMP
533 /* We need a sync somewhere here to make sure that if the
534 * previous task gets rescheduled on another CPU, it sees all
535 * stores it has performed on this one.
536 */
537 sync
538#endif /* CONFIG_SMP */
539
540 tophys(r0,r4)
541 CLR_TOP32(r0)
542 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
543 lwz r1,KSP(r4) /* Load new stack pointer */
544
545 /* save the old current 'last' for return value */
546 mr r3,r2
547 addi r2,r4,-THREAD /* Update current */
548
549#ifdef CONFIG_ALTIVEC
550BEGIN_FTR_SECTION
551 lwz r0,THREAD+THREAD_VRSAVE(r2)
552 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
553END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
554#endif /* CONFIG_ALTIVEC */
555#ifdef CONFIG_SPE
556 lwz r0,THREAD+THREAD_SPEFSCR(r2)
557 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
558#endif /* CONFIG_SPE */
559
560 lwz r0,_CCR(r1)
561 mtcrf 0xFF,r0
562 /* r3-r12 are destroyed -- Cort */
563 REST_NVGPRS(r1)
564
565 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
566 mtlr r4
567 addi r1,r1,INT_FRAME_SIZE
568 blr
569
570 .globl fast_exception_return
571fast_exception_return:
572#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
573 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
574 beq 1f /* if not, we've got problems */
575#endif
576
5772: REST_4GPRS(3, r11)
578 lwz r10,_CCR(r11)
579 REST_GPR(1, r11)
580 mtcr r10
581 lwz r10,_LINK(r11)
582 mtlr r10
583 REST_GPR(10, r11)
584 mtspr SPRN_SRR1,r9
585 mtspr SPRN_SRR0,r12
586 REST_GPR(9, r11)
587 REST_GPR(12, r11)
588 lwz r11,GPR11(r11)
589 SYNC
590 RFI
591
592#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
593/* check if the exception happened in a restartable section */
5941: lis r3,exc_exit_restart_end@ha
595 addi r3,r3,exc_exit_restart_end@l
596 cmplw r12,r3
597 bge 3f
598 lis r4,exc_exit_restart@ha
599 addi r4,r4,exc_exit_restart@l
600 cmplw r12,r4
601 blt 3f
602 lis r3,fee_restarts@ha
603 tophys(r3,r3)
604 lwz r5,fee_restarts@l(r3)
605 addi r5,r5,1
606 stw r5,fee_restarts@l(r3)
607 mr r12,r4 /* restart at exc_exit_restart */
608 b 2b
609
610 .comm fee_restarts,4
611
612/* aargh, a nonrecoverable interrupt, panic */
613/* aargh, we don't know which trap this is */
614/* but the 601 doesn't implement the RI bit, so assume it's OK */
6153:
616BEGIN_FTR_SECTION
617 b 2b
618END_FTR_SECTION_IFSET(CPU_FTR_601)
619 li r10,-1
620 stw r10,_TRAP(r11)
621 addi r3,r1,STACK_FRAME_OVERHEAD
622 lis r10,MSR_KERNEL@h
623 ori r10,r10,MSR_KERNEL@l
624 bl transfer_to_handler_full
625 .long nonrecoverable_exception
626 .long ret_from_except
627#endif
628
629 .globl sigreturn_exit
630sigreturn_exit:
631 subi r1,r3,STACK_FRAME_OVERHEAD
632 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
633 lwz r9,TI_FLAGS(r12)
634 andi. r0,r9,_TIF_SYSCALL_T_OR_A
635 beq+ ret_from_except_full
636 bl do_syscall_trace_leave
637 /* fall through */
638
639 .globl ret_from_except_full
640ret_from_except_full:
641 REST_NVGPRS(r1)
642 /* fall through */
643
644 .globl ret_from_except
645ret_from_except:
646 /* Hard-disable interrupts so that current_thread_info()->flags
647 * can't change between when we test it and when we return
648 * from the interrupt. */
649 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
650 SYNC /* Some chip revs have problems here... */
651 MTMSRD(r10) /* disable interrupts */
652
653 lwz r3,_MSR(r1) /* Returning to user mode? */
654 andi. r0,r3,MSR_PR
655 beq resume_kernel
656
657user_exc_return: /* r10 contains MSR_KERNEL here */
658 /* Check current_thread_info()->flags */
659 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
660 lwz r9,TI_FLAGS(r9)
661 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
662 bne do_work
663
664restore_user:
665#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
666 /* Check whether this process has its own DBCR0 value. The single
667 step bit tells us that dbcr0 should be loaded. */
668 lwz r0,THREAD+THREAD_DBCR0(r2)
669 andis. r10,r0,DBCR0_IC@h
670 bnel- load_dbcr0
671#endif
672
673#ifdef CONFIG_PREEMPT
674 b restore
675
676/* N.B. the only way to get here is from the beq following ret_from_except. */
677resume_kernel:
678 /* check current_thread_info->preempt_count */
679 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
680 lwz r0,TI_PREEMPT(r9)
681 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
682 bne restore
683 lwz r0,TI_FLAGS(r9)
684 andi. r0,r0,_TIF_NEED_RESCHED
685 beq+ restore
686 andi. r0,r3,MSR_EE /* interrupts off? */
687 beq restore /* don't schedule if so */
6881: bl preempt_schedule_irq
689 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
690 lwz r3,TI_FLAGS(r9)
691 andi. r0,r3,_TIF_NEED_RESCHED
692 bne- 1b
693#else
694resume_kernel:
695#endif /* CONFIG_PREEMPT */
696
697 /* interrupts are hard-disabled at this point */
698restore:
699 lwz r0,GPR0(r1)
700 lwz r2,GPR2(r1)
701 REST_4GPRS(3, r1)
702 REST_2GPRS(7, r1)
703
704 lwz r10,_XER(r1)
705 lwz r11,_CTR(r1)
706 mtspr SPRN_XER,r10
707 mtctr r11
708
709 PPC405_ERR77(0,r1)
710 stwcx. r0,0,r1 /* to clear the reservation */
711
712#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
713 lwz r9,_MSR(r1)
714 andi. r10,r9,MSR_RI /* check if this exception occurred */
715 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
716
717 lwz r10,_CCR(r1)
718 lwz r11,_LINK(r1)
719 mtcrf 0xFF,r10
720 mtlr r11
721
722 /*
723 * Once we put values in SRR0 and SRR1, we are in a state
724 * where exceptions are not recoverable, since taking an
725 * exception will trash SRR0 and SRR1. Therefore we clear the
726 * MSR:RI bit to indicate this. If we do take an exception,
727 * we can't return to the point of the exception but we
728 * can restart the exception exit path at the label
729 * exc_exit_restart below. -- paulus
730 */
731 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
732 SYNC
733 MTMSRD(r10) /* clear the RI bit */
734 .globl exc_exit_restart
735exc_exit_restart:
736 lwz r9,_MSR(r1)
737 lwz r12,_NIP(r1)
738 FIX_SRR1(r9,r10)
739 mtspr SPRN_SRR0,r12
740 mtspr SPRN_SRR1,r9
741 REST_4GPRS(9, r1)
742 lwz r1,GPR1(r1)
743 .globl exc_exit_restart_end
744exc_exit_restart_end:
745 SYNC
746 RFI
747
748#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
749 /*
750 * This is a bit different on 4xx/Book-E because it doesn't have
751 * the RI bit in the MSR.
752 * The TLB miss handler checks if we have interrupted
753 * the exception exit path and restarts it if so
754 * (well maybe one day it will... :).
755 */
756 lwz r11,_LINK(r1)
757 mtlr r11
758 lwz r10,_CCR(r1)
759 mtcrf 0xff,r10
760 REST_2GPRS(9, r1)
761 .globl exc_exit_restart
762exc_exit_restart:
763 lwz r11,_NIP(r1)
764 lwz r12,_MSR(r1)
765exc_exit_start:
766 mtspr SPRN_SRR0,r11
767 mtspr SPRN_SRR1,r12
768 REST_2GPRS(11, r1)
769 lwz r1,GPR1(r1)
770 .globl exc_exit_restart_end
771exc_exit_restart_end:
772 PPC405_ERR77_SYNC
773 rfi
774 b . /* prevent prefetch past rfi */
775
776/*
777 * Returning from a critical interrupt in user mode doesn't need
778 * to be any different from a normal exception. For a critical
779 * interrupt in the kernel, we just return (without checking for
780 * preemption) since the interrupt may have happened at some crucial
781 * place (e.g. inside the TLB miss handler), and because we will be
782 * running with r1 pointing into critical_stack, not the current
783 * process's kernel stack (and therefore current_thread_info() will
784 * give the wrong answer).
785 * We have to restore various SPRs that may have been in use at the
786 * time of the critical interrupt.
787 *
788 */
789#ifdef CONFIG_40x
790#define PPC_40x_TURN_OFF_MSR_DR \
791 /* avoid any possible TLB misses here by turning off MSR.DR, we \
792 * assume the instructions here are mapped by a pinned TLB entry */ \
793 li r10,MSR_IR; \
794 mtmsr r10; \
795 isync; \
796 tophys(r1, r1);
797#else
798#define PPC_40x_TURN_OFF_MSR_DR
799#endif
800
801#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
802 REST_NVGPRS(r1); \
803 lwz r3,_MSR(r1); \
804 andi. r3,r3,MSR_PR; \
805 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
806 bne user_exc_return; \
807 lwz r0,GPR0(r1); \
808 lwz r2,GPR2(r1); \
809 REST_4GPRS(3, r1); \
810 REST_2GPRS(7, r1); \
811 lwz r10,_XER(r1); \
812 lwz r11,_CTR(r1); \
813 mtspr SPRN_XER,r10; \
814 mtctr r11; \
815 PPC405_ERR77(0,r1); \
816 stwcx. r0,0,r1; /* to clear the reservation */ \
817 lwz r11,_LINK(r1); \
818 mtlr r11; \
819 lwz r10,_CCR(r1); \
820 mtcrf 0xff,r10; \
821 PPC_40x_TURN_OFF_MSR_DR; \
822 lwz r9,_DEAR(r1); \
823 lwz r10,_ESR(r1); \
824 mtspr SPRN_DEAR,r9; \
825 mtspr SPRN_ESR,r10; \
826 lwz r11,_NIP(r1); \
827 lwz r12,_MSR(r1); \
828 mtspr exc_lvl_srr0,r11; \
829 mtspr exc_lvl_srr1,r12; \
830 lwz r9,GPR9(r1); \
831 lwz r12,GPR12(r1); \
832 lwz r10,GPR10(r1); \
833 lwz r11,GPR11(r1); \
834 lwz r1,GPR1(r1); \
835 PPC405_ERR77_SYNC; \
836 exc_lvl_rfi; \
837 b .; /* prevent prefetch past exc_lvl_rfi */
838
839 .globl ret_from_crit_exc
840ret_from_crit_exc:
841 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
842
843#ifdef CONFIG_BOOKE
844 .globl ret_from_debug_exc
845ret_from_debug_exc:
846 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
847
848 .globl ret_from_mcheck_exc
849ret_from_mcheck_exc:
850 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
851#endif /* CONFIG_BOOKE */
852
853/*
854 * Load the DBCR0 value for a task that is being ptraced,
855 * having first saved away the global DBCR0. Note that r0
856 * has the dbcr0 value to set upon entry to this.
857 */
858load_dbcr0:
859 mfmsr r10 /* first disable debug exceptions */
860 rlwinm r10,r10,0,~MSR_DE
861 mtmsr r10
862 isync
863 mfspr r10,SPRN_DBCR0
864 lis r11,global_dbcr0@ha
865 addi r11,r11,global_dbcr0@l
866 stw r10,0(r11)
867 mtspr SPRN_DBCR0,r0
868 lwz r10,4(r11)
869 addi r10,r10,1
870 stw r10,4(r11)
871 li r11,-1
872 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
873 blr
874
875 .comm global_dbcr0,8
876#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
877
878do_work: /* r10 contains MSR_KERNEL here */
879 andi. r0,r9,_TIF_NEED_RESCHED
880 beq do_user_signal
881
882do_resched: /* r10 contains MSR_KERNEL here */
883 ori r10,r10,MSR_EE
884 SYNC
885 MTMSRD(r10) /* hard-enable interrupts */
886 bl schedule
887recheck:
888 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
889 SYNC
890 MTMSRD(r10) /* disable interrupts */
891 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
892 lwz r9,TI_FLAGS(r9)
893 andi. r0,r9,_TIF_NEED_RESCHED
894 bne- do_resched
895 andi. r0,r9,_TIF_SIGPENDING
896 beq restore_user
897do_user_signal: /* r10 contains MSR_KERNEL here */
898 ori r10,r10,MSR_EE
899 SYNC
900 MTMSRD(r10) /* hard-enable interrupts */
901 /* save r13-r31 in the exception frame, if not already done */
902 lwz r3,_TRAP(r1)
903 andi. r0,r3,1
904 beq 2f
905 SAVE_NVGPRS(r1)
906 rlwinm r3,r3,0,0,30
907 stw r3,_TRAP(r1)
9082: li r3,0
909 addi r4,r1,STACK_FRAME_OVERHEAD
910 bl do_signal
911 REST_NVGPRS(r1)
912 b recheck
913
914/*
915 * We come here when we are at the end of handling an exception
916 * that occurred at a place where taking an exception will lose
917 * state information, such as the contents of SRR0 and SRR1.
918 */
919nonrecoverable:
920 lis r10,exc_exit_restart_end@ha
921 addi r10,r10,exc_exit_restart_end@l
922 cmplw r12,r10
923 bge 3f
924 lis r11,exc_exit_restart@ha
925 addi r11,r11,exc_exit_restart@l
926 cmplw r12,r11
927 blt 3f
928 lis r10,ee_restarts@ha
929 lwz r12,ee_restarts@l(r10)
930 addi r12,r12,1
931 stw r12,ee_restarts@l(r10)
932 mr r12,r11 /* restart at exc_exit_restart */
933 blr
9343: /* OK, we can't recover, kill this process */
935 /* but the 601 doesn't implement the RI bit, so assume it's OK */
936BEGIN_FTR_SECTION
937 blr
938END_FTR_SECTION_IFSET(CPU_FTR_601)
939 lwz r3,_TRAP(r1)
940 andi. r0,r3,1
941 beq 4f
942 SAVE_NVGPRS(r1)
943 rlwinm r3,r3,0,0,30
944 stw r3,_TRAP(r1)
9454: addi r3,r1,STACK_FRAME_OVERHEAD
946 bl nonrecoverable_exception
947 /* shouldn't return */
948 b 4b
949
950 .comm ee_restarts,4
951
952/*
953 * PROM code for specific machines follows. Put it
954 * here so it's easy to add arch-specific sections later.
955 * -- Cort
956 */
957#ifdef CONFIG_PPC_RTAS
958/*
959 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
960 * called with the MMU off.
961 */
962_GLOBAL(enter_rtas)
963 stwu r1,-INT_FRAME_SIZE(r1)
964 mflr r0
965 stw r0,INT_FRAME_SIZE+4(r1)
966 LOADADDR(r4, rtas)
967 lis r6,1f@ha /* physical return address for rtas */
968 addi r6,r6,1f@l
969 tophys(r6,r6)
970 tophys(r7,r1)
971 lwz r8,RTASENTRY(r4)
972 lwz r4,RTASBASE(r4)
973 mfmsr r9
974 stw r9,8(r1)
975 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
976 SYNC /* disable interrupts so SRR0/1 */
977 MTMSRD(r0) /* don't get trashed */
978 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
979 mtlr r6
980 mtspr SPRN_SPRG2,r7
981 mtspr SPRN_SRR0,r8
982 mtspr SPRN_SRR1,r9
983 RFI
9841: tophys(r9,r1)
985 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
986 lwz r9,8(r9) /* original msr value */
987 FIX_SRR1(r9,r0)
988 addi r1,r1,INT_FRAME_SIZE
989 li r0,0
990 mtspr SPRN_SPRG2,r0
991 mtspr SPRN_SRR0,r8
992 mtspr SPRN_SRR1,r9
993 RFI /* return to caller */
994
995 .globl machine_check_in_rtas
996machine_check_in_rtas:
997 twi 31,0,0
998 /* XXX load up BATs and panic */
999
1000#endif /* CONFIG_PPC_RTAS */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
new file mode 100644
index 000000000000..984a10630714
--- /dev/null
+++ b/arch/powerpc/kernel/entry_64.S
@@ -0,0 +1,842 @@
1/*
2 * arch/ppc64/kernel/entry.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 * This file contains the system call entry code, context switch
15 * code, and exception/interrupt return code for PowerPC.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22
23#include <linux/config.h>
24#include <linux/errno.h>
25#include <asm/unistd.h>
26#include <asm/processor.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/cputable.h>
33
34#ifdef CONFIG_PPC_ISERIES
35#define DO_SOFT_DISABLE
36#endif
37
38/*
39 * System calls.
40 */
41 .section ".toc","aw"
42.SYS_CALL_TABLE:
43 .tc .sys_call_table[TC],.sys_call_table
44
45/* This value is used to mark exception frames on the stack. */
46exception_marker:
47 .tc ID_72656773_68657265[TC],0x7265677368657265
48
49 .section ".text"
50 .align 7
51
52#undef SHOW_SYSCALLS
53
54 .globl system_call_common
55system_call_common:
56 andi. r10,r12,MSR_PR
57 mr r10,r1
58 addi r1,r1,-INT_FRAME_SIZE
59 beq- 1f
60 ld r1,PACAKSAVE(r13)
611: std r10,0(r1)
62 std r11,_NIP(r1)
63 std r12,_MSR(r1)
64 std r0,GPR0(r1)
65 std r10,GPR1(r1)
66 std r2,GPR2(r1)
67 std r3,GPR3(r1)
68 std r4,GPR4(r1)
69 std r5,GPR5(r1)
70 std r6,GPR6(r1)
71 std r7,GPR7(r1)
72 std r8,GPR8(r1)
73 li r11,0
74 std r11,GPR9(r1)
75 std r11,GPR10(r1)
76 std r11,GPR11(r1)
77 std r11,GPR12(r1)
78 std r9,GPR13(r1)
79 crclr so
80 mfcr r9
81 mflr r10
82 li r11,0xc01
83 std r9,_CCR(r1)
84 std r10,_LINK(r1)
85 std r11,_TRAP(r1)
86 mfxer r9
87 mfctr r10
88 std r9,_XER(r1)
89 std r10,_CTR(r1)
90 std r3,ORIG_GPR3(r1)
91 ld r2,PACATOC(r13)
92 addi r9,r1,STACK_FRAME_OVERHEAD
93 ld r11,exception_marker@toc(r2)
94 std r11,-16(r9) /* "regshere" marker */
95#ifdef CONFIG_PPC_ISERIES
96 /* Hack for handling interrupts when soft-enabling on iSeries */
97 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
98 andi. r10,r12,MSR_PR /* from kernel */
99 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
100 beq hardware_interrupt_entry
101 lbz r10,PACAPROCENABLED(r13)
102 std r10,SOFTE(r1)
103#endif
104 mfmsr r11
105 ori r11,r11,MSR_EE
106 mtmsrd r11,1
107
108#ifdef SHOW_SYSCALLS
109 bl .do_show_syscall
110 REST_GPR(0,r1)
111 REST_4GPRS(3,r1)
112 REST_2GPRS(7,r1)
113 addi r9,r1,STACK_FRAME_OVERHEAD
114#endif
115 clrrdi r11,r1,THREAD_SHIFT
116 li r12,0
117 ld r10,TI_FLAGS(r11)
118 stb r12,TI_SC_NOERR(r11)
119 andi. r11,r10,_TIF_SYSCALL_T_OR_A
120 bne- syscall_dotrace
121syscall_dotrace_cont:
122 cmpldi 0,r0,NR_syscalls
123 bge- syscall_enosys
124
125system_call: /* label this so stack traces look sane */
126/*
127 * Need to vector to 32 Bit or default sys_call_table here,
128 * based on caller's run-mode / personality.
129 */
130 ld r11,.SYS_CALL_TABLE@toc(2)
131 andi. r10,r10,_TIF_32BIT
132 beq 15f
133 addi r11,r11,8 /* use 32-bit syscall entries */
134 clrldi r3,r3,32
135 clrldi r4,r4,32
136 clrldi r5,r5,32
137 clrldi r6,r6,32
138 clrldi r7,r7,32
139 clrldi r8,r8,32
14015:
141 slwi r0,r0,4
142 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
143 mtctr r10
144 bctrl /* Call handler */
145
146syscall_exit:
147#ifdef SHOW_SYSCALLS
148 std r3,GPR3(r1)
149 bl .do_show_syscall_exit
150 ld r3,GPR3(r1)
151#endif
152 std r3,RESULT(r1)
153 ld r5,_CCR(r1)
154 li r10,-_LAST_ERRNO
155 cmpld r3,r10
156 clrrdi r12,r1,THREAD_SHIFT
157 bge- syscall_error
158syscall_error_cont:
159
160 /* check for syscall tracing or audit */
161 ld r9,TI_FLAGS(r12)
162 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
163 bne- syscall_exit_trace
164syscall_exit_trace_cont:
165
166 /* disable interrupts so current_thread_info()->flags can't change,
167 and so that we don't get interrupted after loading SRR0/1. */
168 ld r8,_MSR(r1)
169 andi. r10,r8,MSR_RI
170 beq- unrecov_restore
171 mfmsr r10
172 rldicl r10,r10,48,1
173 rotldi r10,r10,16
174 mtmsrd r10,1
175 ld r9,TI_FLAGS(r12)
176 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
177 bne- syscall_exit_work
178 ld r7,_NIP(r1)
179 stdcx. r0,0,r1 /* to clear the reservation */
180 andi. r6,r8,MSR_PR
181 ld r4,_LINK(r1)
182 beq- 1f /* only restore r13 if */
183 ld r13,GPR13(r1) /* returning to usermode */
1841: ld r2,GPR2(r1)
185 li r12,MSR_RI
186 andc r10,r10,r12
187 mtmsrd r10,1 /* clear MSR.RI */
188 ld r1,GPR1(r1)
189 mtlr r4
190 mtcr r5
191 mtspr SPRN_SRR0,r7
192 mtspr SPRN_SRR1,r8
193 rfid
194 b . /* prevent speculative execution */
195
196syscall_enosys:
197 li r3,-ENOSYS
198 std r3,RESULT(r1)
199 clrrdi r12,r1,THREAD_SHIFT
200 ld r5,_CCR(r1)
201
202syscall_error:
203 lbz r11,TI_SC_NOERR(r12)
204 cmpwi 0,r11,0
205 bne- syscall_error_cont
206 neg r3,r3
207 oris r5,r5,0x1000 /* Set SO bit in CR */
208 std r5,_CCR(r1)
209 b syscall_error_cont
210
211/* Traced system call support */
212syscall_dotrace:
213 bl .save_nvgprs
214 addi r3,r1,STACK_FRAME_OVERHEAD
215 bl .do_syscall_trace_enter
216 ld r0,GPR0(r1) /* Restore original registers */
217 ld r3,GPR3(r1)
218 ld r4,GPR4(r1)
219 ld r5,GPR5(r1)
220 ld r6,GPR6(r1)
221 ld r7,GPR7(r1)
222 ld r8,GPR8(r1)
223 addi r9,r1,STACK_FRAME_OVERHEAD
224 clrrdi r10,r1,THREAD_SHIFT
225 ld r10,TI_FLAGS(r10)
226 b syscall_dotrace_cont
227
228syscall_exit_trace:
229 std r3,GPR3(r1)
230 bl .save_nvgprs
231 addi r3,r1,STACK_FRAME_OVERHEAD
232 bl .do_syscall_trace_leave
233 REST_NVGPRS(r1)
234 ld r3,GPR3(r1)
235 ld r5,_CCR(r1)
236 clrrdi r12,r1,THREAD_SHIFT
237 b syscall_exit_trace_cont
238
239/* Stuff to do on exit from a system call. */
240syscall_exit_work:
241 std r3,GPR3(r1)
242 std r5,_CCR(r1)
243 b .ret_from_except_lite
244
245/* Save non-volatile GPRs, if not already saved. */
246_GLOBAL(save_nvgprs)
247 ld r11,_TRAP(r1)
248 andi. r0,r11,1
249 beqlr-
250 SAVE_NVGPRS(r1)
251 clrrdi r0,r11,1
252 std r0,_TRAP(r1)
253 blr
254
255/*
256 * The sigsuspend and rt_sigsuspend system calls can call do_signal
257 * and thus put the process into the stopped state where we might
258 * want to examine its user state with ptrace. Therefore we need
259 * to save all the nonvolatile registers (r14 - r31) before calling
260 * the C code. Similarly, fork, vfork and clone need the full
261 * register state on the stack so that it can be copied to the child.
262 */
263_GLOBAL(ppc32_sigsuspend)
264 bl .save_nvgprs
265 bl .compat_sys_sigsuspend
266 b 70f
267
268_GLOBAL(ppc64_rt_sigsuspend)
269 bl .save_nvgprs
270 bl .sys_rt_sigsuspend
271 b 70f
272
273_GLOBAL(ppc32_rt_sigsuspend)
274 bl .save_nvgprs
275 bl .compat_sys_rt_sigsuspend
27670: cmpdi 0,r3,0
277 /* If it returned an error, we need to return via syscall_exit to set
278 the SO bit in cr0 and potentially stop for ptrace. */
279 bne syscall_exit
280 /* If sigsuspend() returns zero, we are going into a signal handler. We
281 may need to call audit_syscall_exit() to mark the exit from sigsuspend() */
282#ifdef CONFIG_AUDIT
283 ld r3,PACACURRENT(r13)
284 ld r4,AUDITCONTEXT(r3)
285 cmpdi 0,r4,0
286 beq .ret_from_except /* No audit_context: Leave immediately. */
287 li r4, 2 /* AUDITSC_FAILURE */
288 li r5,-4 /* It's always -EINTR */
289 bl .audit_syscall_exit
290#endif
291 b .ret_from_except
292
293_GLOBAL(ppc_fork)
294 bl .save_nvgprs
295 bl .sys_fork
296 b syscall_exit
297
298_GLOBAL(ppc_vfork)
299 bl .save_nvgprs
300 bl .sys_vfork
301 b syscall_exit
302
303_GLOBAL(ppc_clone)
304 bl .save_nvgprs
305 bl .sys_clone
306 b syscall_exit
307
308_GLOBAL(ppc32_swapcontext)
309 bl .save_nvgprs
310 bl .compat_sys_swapcontext
311 b 80f
312
313_GLOBAL(ppc64_swapcontext)
314 bl .save_nvgprs
315 bl .sys_swapcontext
316 b 80f
317
318_GLOBAL(ppc32_sigreturn)
319 bl .compat_sys_sigreturn
320 b 80f
321
322_GLOBAL(ppc32_rt_sigreturn)
323 bl .compat_sys_rt_sigreturn
324 b 80f
325
326_GLOBAL(ppc64_rt_sigreturn)
327 bl .sys_rt_sigreturn
328
32980: cmpdi 0,r3,0
330 blt syscall_exit
331 clrrdi r4,r1,THREAD_SHIFT
332 ld r4,TI_FLAGS(r4)
333 andi. r4,r4,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
334 beq+ 81f
335 addi r3,r1,STACK_FRAME_OVERHEAD
336 bl .do_syscall_trace_leave
33781: b .ret_from_except
338
339_GLOBAL(ret_from_fork)
340 bl .schedule_tail
341 REST_NVGPRS(r1)
342 li r3,0
343 b syscall_exit
344
345/*
346 * This routine switches between two different tasks. The process
347 * state of one is saved on its kernel stack. Then the state
348 * of the other is restored from its kernel stack. The memory
349 * management hardware is updated to the second process's state.
350 * Finally, we can return to the second process, via ret_from_except.
351 * On entry, r3 points to the THREAD for the current task, r4
352 * points to the THREAD for the new task.
353 *
354 * Note: there are two ways to get to the "going out" portion
355 * of this code; either by coming in via the entry (_switch)
356 * or via "fork" which must set up an environment equivalent
357 * to the "_switch" path. If you change this you'll have to change
358 * the fork code also.
359 *
360 * The code which creates the new task context is in 'copy_thread'
361 * in arch/ppc64/kernel/process.c
362 */
363 .align 7
364_GLOBAL(_switch)
365 mflr r0
366 std r0,16(r1)
367 stdu r1,-SWITCH_FRAME_SIZE(r1)
368 /* r3-r13 are caller saved -- Cort */
369 SAVE_8GPRS(14, r1)
370 SAVE_10GPRS(22, r1)
371 mflr r20 /* Return to switch caller */
372 mfmsr r22
373 li r0, MSR_FP
374#ifdef CONFIG_ALTIVEC
375BEGIN_FTR_SECTION
376 oris r0,r0,MSR_VEC@h /* Disable altivec */
377 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
378 std r24,THREAD_VRSAVE(r3)
379END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
380#endif /* CONFIG_ALTIVEC */
381 and. r0,r0,r22
382 beq+ 1f
383 andc r22,r22,r0
384 mtmsrd r22
385 isync
3861: std r20,_NIP(r1)
387 mfcr r23
388 std r23,_CCR(r1)
389 std r1,KSP(r3) /* Set old stack pointer */
390
391#ifdef CONFIG_SMP
392 /* We need a sync somewhere here to make sure that if the
393 * previous task gets rescheduled on another CPU, it sees all
394 * stores it has performed on this one.
395 */
396 sync
397#endif /* CONFIG_SMP */
398
399 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
400 std r6,PACACURRENT(r13) /* Set new 'current' */
401
402 ld r8,KSP(r4) /* new stack pointer */
403BEGIN_FTR_SECTION
404 clrrdi r6,r8,28 /* get its ESID */
405 clrrdi r9,r1,28 /* get current sp ESID */
406 clrldi. r0,r6,2 /* is new ESID c00000000? */
407 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
408 cror eq,4*cr1+eq,eq
409 beq 2f /* if yes, don't slbie it */
410
411 /* Bolt in the new stack SLB entry */
412 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
413 oris r0,r6,(SLB_ESID_V)@h
414 ori r0,r0,(SLB_NUM_BOLTED-1)@l
415 slbie r6
416 slbie r6 /* Workaround POWER5 < DD2.1 issue */
417 slbmte r7,r0
418 isync
419
4202:
421END_FTR_SECTION_IFSET(CPU_FTR_SLB)
422 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
423 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
424 because we don't need to leave the 288-byte ABI gap at the
425 top of the kernel stack. */
426 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
427
428 mr r1,r8 /* start using new stack pointer */
429 std r7,PACAKSAVE(r13)
430
431 ld r6,_CCR(r1)
432 mtcrf 0xFF,r6
433
434#ifdef CONFIG_ALTIVEC
435BEGIN_FTR_SECTION
436 ld r0,THREAD_VRSAVE(r4)
437 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
438END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
439#endif /* CONFIG_ALTIVEC */
440
441 /* r3-r13 are destroyed -- Cort */
442 REST_8GPRS(14, r1)
443 REST_10GPRS(22, r1)
444
445 /* convert old thread to its task_struct for return value */
446 addi r3,r3,-THREAD
447 ld r7,_NIP(r1) /* Return to _switch caller in new task */
448 mtlr r7
449 addi r1,r1,SWITCH_FRAME_SIZE
450 blr
451
452 .align 7
453_GLOBAL(ret_from_except)
454 ld r11,_TRAP(r1)
455 andi. r0,r11,1
456 bne .ret_from_except_lite
457 REST_NVGPRS(r1)
458
459_GLOBAL(ret_from_except_lite)
460 /*
461 * Disable interrupts so that current_thread_info()->flags
462 * can't change between when we test it and when we return
463 * from the interrupt.
464 */
465 mfmsr r10 /* Get current interrupt state */
466 rldicl r9,r10,48,1 /* clear MSR_EE */
467 rotldi r9,r9,16
468 mtmsrd r9,1 /* Update machine state */
469
470#ifdef CONFIG_PREEMPT
471 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
472 li r0,_TIF_NEED_RESCHED /* bits to check */
473 ld r3,_MSR(r1)
474 ld r4,TI_FLAGS(r9)
475 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
476 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
477 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
478 bne do_work
479
480#else /* !CONFIG_PREEMPT */
481 ld r3,_MSR(r1) /* Returning to user mode? */
482 andi. r3,r3,MSR_PR
483 beq restore /* if not, just restore regs and return */
484
485 /* Check current_thread_info()->flags */
486 clrrdi r9,r1,THREAD_SHIFT
487 ld r4,TI_FLAGS(r9)
488 andi. r0,r4,_TIF_USER_WORK_MASK
489 bne do_work
490#endif
491
492restore:
493#ifdef CONFIG_PPC_ISERIES
494 ld r5,SOFTE(r1)
495 cmpdi 0,r5,0
496 beq 4f
497 /* Check for pending interrupts (iSeries) */
498 ld r3,PACALPPACA+LPPACAANYINT(r13)
499 cmpdi r3,0
500 beq+ 4f /* skip do_IRQ if no interrupts */
501
502 li r3,0
503 stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
504 ori r10,r10,MSR_EE
505 mtmsrd r10 /* hard-enable again */
506 addi r3,r1,STACK_FRAME_OVERHEAD
507 bl .do_IRQ
508 b .ret_from_except_lite /* loop back and handle more */
509
5104: stb r5,PACAPROCENABLED(r13)
511#endif
512
513 ld r3,_MSR(r1)
514 andi. r0,r3,MSR_RI
515 beq- unrecov_restore
516
517 andi. r0,r3,MSR_PR
518
519 /*
520 * r13 is our per cpu area, only restore it if we are returning to
521 * userspace
522 */
523 beq 1f
524 REST_GPR(13, r1)
5251:
526 ld r3,_CTR(r1)
527 ld r0,_LINK(r1)
528 mtctr r3
529 mtlr r0
530 ld r3,_XER(r1)
531 mtspr SPRN_XER,r3
532
533 REST_8GPRS(5, r1)
534
535 stdcx. r0,0,r1 /* to clear the reservation */
536
537 mfmsr r0
538 li r2, MSR_RI
539 andc r0,r0,r2
540 mtmsrd r0,1
541
542 ld r0,_MSR(r1)
543 mtspr SPRN_SRR1,r0
544
545 ld r2,_CCR(r1)
546 mtcrf 0xFF,r2
547 ld r2,_NIP(r1)
548 mtspr SPRN_SRR0,r2
549
550 ld r0,GPR0(r1)
551 ld r2,GPR2(r1)
552 ld r3,GPR3(r1)
553 ld r4,GPR4(r1)
554 ld r1,GPR1(r1)
555
556 rfid
557 b . /* prevent speculative execution */
558
559/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
560do_work:
561#ifdef CONFIG_PREEMPT
562 andi. r0,r3,MSR_PR /* Returning to user mode? */
563 bne user_work
564 /* Check that preempt_count() == 0 and interrupts are enabled */
565 lwz r8,TI_PREEMPT(r9)
566 cmpwi cr1,r8,0
567#ifdef CONFIG_PPC_ISERIES
568 ld r0,SOFTE(r1)
569 cmpdi r0,0
570#else
571 andi. r0,r3,MSR_EE
572#endif
573 crandc eq,cr1*4+eq,eq
574 bne restore
575 /* here we are preempting the current task */
5761:
577#ifdef CONFIG_PPC_ISERIES
578 li r0,1
579 stb r0,PACAPROCENABLED(r13)
580#endif
581 ori r10,r10,MSR_EE
582 mtmsrd r10,1 /* reenable interrupts */
583 bl .preempt_schedule
584 mfmsr r10
585 clrrdi r9,r1,THREAD_SHIFT
586 rldicl r10,r10,48,1 /* disable interrupts again */
587 rotldi r10,r10,16
588 mtmsrd r10,1
589 ld r4,TI_FLAGS(r9)
590 andi. r0,r4,_TIF_NEED_RESCHED
591 bne 1b
592 b restore
593
594user_work:
595#endif
596 /* Enable interrupts */
597 ori r10,r10,MSR_EE
598 mtmsrd r10,1
599
600 andi. r0,r4,_TIF_NEED_RESCHED
601 beq 1f
602 bl .schedule
603 b .ret_from_except_lite
604
6051: bl .save_nvgprs
606 li r3,0
607 addi r4,r1,STACK_FRAME_OVERHEAD
608 bl .do_signal
609 b .ret_from_except
610
611unrecov_restore:
612 addi r3,r1,STACK_FRAME_OVERHEAD
613 bl .unrecoverable_exception
614 b unrecov_restore
615
616#ifdef CONFIG_PPC_RTAS
617/*
618 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
619 * called with the MMU off.
620 *
621 * In addition, we need to be in 32b mode, at least for now.
622 *
623 * Note: r3 is an input parameter to rtas, so don't trash it...
624 */
625_GLOBAL(enter_rtas)
626 mflr r0
627 std r0,16(r1)
628 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
629
630 /* Because RTAS is running in 32b mode, it clobbers the high order half
631 * of all registers that it saves. We therefore save those registers
632 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
633 */
634 SAVE_GPR(2, r1) /* Save the TOC */
635 SAVE_GPR(13, r1) /* Save paca */
636 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
637 SAVE_10GPRS(22, r1) /* ditto */
638
639 mfcr r4
640 std r4,_CCR(r1)
641 mfctr r5
642 std r5,_CTR(r1)
643 mfspr r6,SPRN_XER
644 std r6,_XER(r1)
645 mfdar r7
646 std r7,_DAR(r1)
647 mfdsisr r8
648 std r8,_DSISR(r1)
649 mfsrr0 r9
650 std r9,_SRR0(r1)
651 mfsrr1 r10
652 std r10,_SRR1(r1)
653
654 /* There is no way it is acceptable to get here with interrupts enabled,
655 * check it with the asm equivalent of WARN_ON
656 */
657 mfmsr r6
658 andi. r0,r6,MSR_EE
6591: tdnei r0,0
660.section __bug_table,"a"
661 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
662.previous
663.section .rodata,"a"
6641: .asciz __FILE__
6652: .asciz "enter_rtas"
666.previous
667
668 /* Unfortunately, the stack pointer and the MSR are also clobbered,
669 * so they are saved in the PACA which allows us to restore
670 * our original state after RTAS returns.
671 */
672 std r1,PACAR1(r13)
673 std r6,PACASAVEDMSR(r13)
674
675 /* Setup our real return addr */
676 SET_REG_TO_LABEL(r4,.rtas_return_loc)
677 SET_REG_TO_CONST(r9,KERNELBASE)
678 sub r4,r4,r9
679 mtlr r4
680
681 li r0,0
682 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
683 andc r0,r6,r0
684
685 li r9,1
686 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
687 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
688 andc r6,r0,r9
689 ori r6,r6,MSR_RI
690 sync /* disable interrupts so SRR0/1 */
691 mtmsrd r0 /* don't get trashed */
692
693 SET_REG_TO_LABEL(r4,rtas)
694 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
695 ld r4,RTASBASE(r4) /* get the rtas->base value */
696
697 mtspr SPRN_SRR0,r5
698 mtspr SPRN_SRR1,r6
699 rfid
700 b . /* prevent speculative execution */
701
702_STATIC(rtas_return_loc)
703 /* relocation is off at this point */
704 mfspr r4,SPRN_SPRG3 /* Get PACA */
705 SET_REG_TO_CONST(r5, KERNELBASE)
706 sub r4,r4,r5 /* RELOC the PACA base pointer */
707
708 mfmsr r6
709 li r0,MSR_RI
710 andc r6,r6,r0
711 sync
712 mtmsrd r6
713
714 ld r1,PACAR1(r4) /* Restore our SP */
715 LOADADDR(r3,.rtas_restore_regs)
716 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
717
718 mtspr SPRN_SRR0,r3
719 mtspr SPRN_SRR1,r4
720 rfid
721 b . /* prevent speculative execution */
722
723_STATIC(rtas_restore_regs)
724 /* relocation is on at this point */
725 REST_GPR(2, r1) /* Restore the TOC */
726 REST_GPR(13, r1) /* Restore paca */
727 REST_8GPRS(14, r1) /* Restore the non-volatiles */
728 REST_10GPRS(22, r1) /* ditto */
729
730 mfspr r13,SPRN_SPRG3
731
732 ld r4,_CCR(r1)
733 mtcr r4
734 ld r5,_CTR(r1)
735 mtctr r5
736 ld r6,_XER(r1)
737 mtspr SPRN_XER,r6
738 ld r7,_DAR(r1)
739 mtdar r7
740 ld r8,_DSISR(r1)
741 mtdsisr r8
742 ld r9,_SRR0(r1)
743 mtsrr0 r9
744 ld r10,_SRR1(r1)
745 mtsrr1 r10
746
747 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
748 ld r0,16(r1) /* get return address */
749
750 mtlr r0
751 blr /* return to caller */
752
753#endif /* CONFIG_PPC_RTAS */
754
755#ifdef CONFIG_PPC_MULTIPLATFORM
756
757_GLOBAL(enter_prom)
758 mflr r0
759 std r0,16(r1)
760 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
761
762 /* Because PROM is running in 32b mode, it clobbers the high order half
763 * of all registers that it saves. We therefore save those registers
764 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
765 */
766 SAVE_8GPRS(2, r1)
767 SAVE_GPR(13, r1)
768 SAVE_8GPRS(14, r1)
769 SAVE_10GPRS(22, r1)
770 mfcr r4
771 std r4,_CCR(r1)
772 mfctr r5
773 std r5,_CTR(r1)
774 mfspr r6,SPRN_XER
775 std r6,_XER(r1)
776 mfdar r7
777 std r7,_DAR(r1)
778 mfdsisr r8
779 std r8,_DSISR(r1)
780 mfsrr0 r9
781 std r9,_SRR0(r1)
782 mfsrr1 r10
783 std r10,_SRR1(r1)
784 mfmsr r11
785 std r11,_MSR(r1)
786
787 /* Get the PROM entrypoint */
788 ld r0,GPR4(r1)
789 mtlr r0
790
791 /* Switch MSR to 32 bits mode
792 */
793 mfmsr r11
794 li r12,1
795 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
796 andc r11,r11,r12
797 li r12,1
798 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
799 andc r11,r11,r12
800 mtmsrd r11
801 isync
802
803 /* Restore arguments & enter PROM here... */
804 ld r3,GPR3(r1)
805 blrl
806
807 /* Just make sure that r1 top 32 bits didn't get
808 * corrupt by OF
809 */
810 rldicl r1,r1,0,32
811
812 /* Restore the MSR (back to 64 bits) */
813 ld r0,_MSR(r1)
814 mtmsrd r0
815 isync
816
817 /* Restore other registers */
818 REST_GPR(2, r1)
819 REST_GPR(13, r1)
820 REST_8GPRS(14, r1)
821 REST_10GPRS(22, r1)
822 ld r4,_CCR(r1)
823 mtcr r4
824 ld r5,_CTR(r1)
825 mtctr r5
826 ld r6,_XER(r1)
827 mtspr SPRN_XER,r6
828 ld r7,_DAR(r1)
829 mtdar r7
830 ld r8,_DSISR(r1)
831 mtdsisr r8
832 ld r9,_SRR0(r1)
833 mtsrr0 r9
834 ld r10,_SRR1(r1)
835 mtsrr1 r10
836
837 addi r1,r1,PROM_FRAME_SIZE
838 ld r0,16(r1)
839 mtlr r0
840 blr
841
842#endif /* CONFIG_PPC_MULTIPLATFORM */
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
new file mode 100644
index 000000000000..4d6001fa1cf2
--- /dev/null
+++ b/arch/powerpc/kernel/fpu.S
@@ -0,0 +1,144 @@
1/*
2 * FPU support code, moved here from head.S so that it can be used
3 * by chips which use other head-whatever.S files.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 */
11
12#include <linux/config.h>
13#include <asm/reg.h>
14#include <asm/page.h>
15#include <asm/mmu.h>
16#include <asm/pgtable.h>
17#include <asm/cputable.h>
18#include <asm/cache.h>
19#include <asm/thread_info.h>
20#include <asm/ppc_asm.h>
21#include <asm/asm-offsets.h>
22
23/*
24 * This task wants to use the FPU now.
25 * On UP, disable FP for the task which had the FPU previously,
26 * and save its floating-point registers in its thread_struct.
27 * Load up this task's FP registers from its thread_struct,
28 * enable the FPU for the current task and return to the task.
29 */
30_GLOBAL(load_up_fpu)
31 mfmsr r5
32 ori r5,r5,MSR_FP
33 SYNC
34 MTMSRD(r5) /* enable use of fpu now */
35 isync
36/*
37 * For SMP, we don't do lazy FPU switching because it just gets too
38 * horrendously complex, especially when a task switches from one CPU
39 * to another. Instead we call giveup_fpu in switch_to.
40 */
41#ifndef CONFIG_SMP
42 LOADBASE(r3, last_task_used_math)
43 toreal(r3)
44 LDL r4,OFF(last_task_used_math)(r3)
45 CMPI 0,r4,0
46 beq 1f
47 toreal(r4)
48 addi r4,r4,THREAD /* want last_task_used_math->thread */
49 SAVE_32FPRS(0, r4)
50 mffs fr0
51 stfd fr0,THREAD_FPSCR(r4)
52 LDL r5,PT_REGS(r4)
53 toreal(r5)
54 LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
55 li r10,MSR_FP|MSR_FE0|MSR_FE1
56 andc r4,r4,r10 /* disable FP for previous task */
57 STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
581:
59#endif /* CONFIG_SMP */
60 /* enable use of FP after return */
61#ifdef CONFIG_PPC32
62 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
63 lwz r4,THREAD_FPEXC_MODE(r5)
64 ori r9,r9,MSR_FP /* enable FP for current */
65 or r9,r9,r4
66#else
67 ld r4,PACACURRENT(r13)
68 addi r5,r4,THREAD /* Get THREAD */
69 ld r4,THREAD_FPEXC_MODE(r5)
70 ori r12,r12,MSR_FP
71 or r12,r12,r4
72 std r12,_MSR(r1)
73#endif
74 lfd fr0,THREAD_FPSCR(r5)
75 mtfsf 0xff,fr0
76 REST_32FPRS(0, r5)
77#ifndef CONFIG_SMP
78 subi r4,r5,THREAD
79 fromreal(r4)
80 STL r4,OFF(last_task_used_math)(r3)
81#endif /* CONFIG_SMP */
82 /* restore registers and return */
83 /* we haven't used ctr or xer or lr */
84 b fast_exception_return
85
86/*
87 * giveup_fpu(tsk)
88 * Disable FP for the task given as the argument,
89 * and save the floating-point registers in its thread_struct.
90 * Enables the FPU for use in the kernel on return.
91 */
92_GLOBAL(giveup_fpu)
93 mfmsr r5
94 ori r5,r5,MSR_FP
95 SYNC_601
96 ISYNC_601
97 MTMSRD(r5) /* enable use of fpu now */
98 SYNC_601
99 isync
100 CMPI 0,r3,0
101 beqlr- /* if no previous owner, done */
102 addi r3,r3,THREAD /* want THREAD of task */
103 LDL r5,PT_REGS(r3)
104 CMPI 0,r5,0
105 SAVE_32FPRS(0, r3)
106 mffs fr0
107 stfd fr0,THREAD_FPSCR(r3)
108 beq 1f
109 LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
110 li r3,MSR_FP|MSR_FE0|MSR_FE1
111 andc r4,r4,r3 /* disable FP for previous task */
112 STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1131:
114#ifndef CONFIG_SMP
115 li r5,0
116 LOADBASE(r4,last_task_used_math)
117 STL r5,OFF(last_task_used_math)(r4)
118#endif /* CONFIG_SMP */
119 blr
120
121/*
122 * These are used in the alignment trap handler when emulating
123 * single-precision loads and stores.
124 * We restore and save the fpscr so the task gets the same result
125 * and exceptions as if the cpu had performed the load or store.
126 */
127
128_GLOBAL(cvt_fd)
129 lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
130 mtfsf 0xff,0
131 lfs 0,0(r3)
132 stfd 0,0(r4)
133 mffs 0
134 stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */
135 blr
136
137_GLOBAL(cvt_df)
138 lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
139 mtfsf 0xff,0
140 lfd 0,0(r3)
141 stfs 0,0(r4)
142 mffs 0
143 stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */
144 blr
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
new file mode 100644
index 000000000000..b102e3a2415e
--- /dev/null
+++ b/arch/powerpc/kernel/head_32.S
@@ -0,0 +1,1381 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13 *
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 */
24
25#include <linux/config.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/pgtable.h>
30#include <asm/cputable.h>
31#include <asm/cache.h>
32#include <asm/thread_info.h>
33#include <asm/ppc_asm.h>
34#include <asm/asm-offsets.h>
35
36#ifdef CONFIG_APUS
37#include <asm/amigappc.h>
38#endif
39
40/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
41#define LOAD_BAT(n, reg, RA, RB) \
42 /* see the comment for clear_bats() -- Cort */ \
43 li RA,0; \
44 mtspr SPRN_IBAT##n##U,RA; \
45 mtspr SPRN_DBAT##n##U,RA; \
46 lwz RA,(n*16)+0(reg); \
47 lwz RB,(n*16)+4(reg); \
48 mtspr SPRN_IBAT##n##U,RA; \
49 mtspr SPRN_IBAT##n##L,RB; \
50 beq 1f; \
51 lwz RA,(n*16)+8(reg); \
52 lwz RB,(n*16)+12(reg); \
53 mtspr SPRN_DBAT##n##U,RA; \
54 mtspr SPRN_DBAT##n##L,RB; \
551:
56
57 .text
58 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
59 .stabs "head_32.S",N_SO,0,0,0f
600:
61 .globl _stext
62_stext:
63
64/*
65 * _start is defined this way because the XCOFF loader in the OpenFirmware
66 * on the powermac expects the entry point to be a procedure descriptor.
67 */
68 .text
69 .globl _start
70_start:
71 /*
72 * These are here for legacy reasons, the kernel used to
73 * need to look like a coff function entry for the pmac
74 * but we're always started by some kind of bootloader now.
75 * -- Cort
76 */
77 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
78 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
79 nop
80
81/* PMAC
82 * Enter here with the kernel text, data and bss loaded starting at
83 * 0, running with virtual == physical mapping.
84 * r5 points to the prom entry point (the client interface handler
85 * address). Address translation is turned on, with the prom
86 * managing the hash table. Interrupts are disabled. The stack
87 * pointer (r1) points to just below the end of the half-meg region
88 * from 0x380000 - 0x400000, which is mapped in already.
89 *
90 * If we are booted from MacOS via BootX, we enter with the kernel
91 * image loaded somewhere, and the following values in registers:
92 * r3: 'BooX' (0x426f6f58)
93 * r4: virtual address of boot_infos_t
94 * r5: 0
95 *
96 * APUS
97 * r3: 'APUS'
98 * r4: physical address of memory base
99 * Linux/m68k style BootInfo structure at &_end.
100 *
101 * PREP
102 * This is jumped to on prep systems right after the kernel is relocated
103 * to its proper place in memory by the boot loader. The expected layout
104 * of the regs is:
105 * r3: ptr to residual data
106 * r4: initrd_start or if no initrd then 0
107 * r5: initrd_end - unused if r4 is 0
108 * r6: Start of command line string
109 * r7: End of command line string
110 *
111 * This just gets a minimal mmu environment setup so we can call
112 * start_here() to do the real work.
113 * -- Cort
114 */
115
116 .globl __start
117__start:
118/*
119 * We have to do any OF calls before we map ourselves to KERNELBASE,
120 * because OF may have I/O devices mapped into that area
121 * (particularly on CHRP).
122 */
123 cmpwi 0,r5,0
124 beq 1f
125 bl prom_init
126 trap
127
1281: mr r31,r3 /* save parameters */
129 mr r30,r4
130 li r24,0 /* cpu # */
131
132/*
133 * early_init() does the early machine identification and does
134 * the necessary low-level setup and clears the BSS
135 * -- Cort <cort@fsmlabs.com>
136 */
137 bl early_init
138
139#ifdef CONFIG_APUS
140/* On APUS the __va/__pa constants need to be set to the correct
141 * values before continuing.
142 */
143 mr r4,r30
144 bl fix_mem_constants
145#endif /* CONFIG_APUS */
146
147/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
148 * the physical address we are running at, returned by early_init()
149 */
150 bl mmu_off
151__after_mmu_off:
152 bl clear_bats
153 bl flush_tlbs
154
155 bl initial_bats
156
157/*
158 * Call setup_cpu for CPU 0 and initialize 6xx Idle
159 */
160 bl reloc_offset
161 li r24,0 /* cpu# */
162 bl call_setup_cpu /* Call setup_cpu for this CPU */
163#ifdef CONFIG_6xx
164 bl reloc_offset
165 bl init_idle_6xx
166#endif /* CONFIG_6xx */
167
168
169#ifndef CONFIG_APUS
170/*
171 * We need to run with _start at physical address 0.
172 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
173 * the exception vectors at 0 (and therefore this copy
174 * overwrites OF's exception vectors with our own).
175 * The MMU is off at this point.
176 */
177 bl reloc_offset
178 mr r26,r3
179 addis r4,r3,KERNELBASE@h /* current address of _start */
180 cmpwi 0,r4,0 /* are we already running at 0? */
181 bne relocate_kernel
182#endif /* CONFIG_APUS */
183/*
184 * we now have the 1st 16M of ram mapped with the bats.
185 * prep needs the mmu to be turned on here, but pmac already has it on.
186 * this shouldn't bother the pmac since it just gets turned on again
187 * as we jump to our code at KERNELBASE. -- Cort
188 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
189 * off, and in other cases, we now turn it off before changing BATs above.
190 */
191turn_on_mmu:
192 mfmsr r0
193 ori r0,r0,MSR_DR|MSR_IR
194 mtspr SPRN_SRR1,r0
195 lis r0,start_here@h
196 ori r0,r0,start_here@l
197 mtspr SPRN_SRR0,r0
198 SYNC
199 RFI /* enables MMU */
200
201/*
202 * We need __secondary_hold as a place to hold the other cpus on
203 * an SMP machine, even when we are running a UP kernel.
204 */
205 . = 0xc0 /* for prep bootloader */
206 li r3,1 /* MTX only has 1 cpu */
207 .globl __secondary_hold
208__secondary_hold:
209 /* tell the master we're here */
210 stw r3,__secondary_hold_acknowledge@l(0)
211#ifdef CONFIG_SMP
212100: lwz r4,0(0)
213 /* wait until we're told to start */
214 cmpw 0,r4,r3
215 bne 100b
216 /* our cpu # was at addr 0 - go */
217 mr r24,r3 /* cpu # */
218 b __secondary_start
219#else
220 b .
221#endif /* CONFIG_SMP */
222
223 .globl __secondary_hold_spinloop
224__secondary_hold_spinloop:
225 .long 0
226 .globl __secondary_hold_acknowledge
227__secondary_hold_acknowledge:
228 .long -1
229
230/*
231 * Exception entry code. This code runs with address translation
232 * turned off, i.e. using physical addresses.
233 * We assume sprg3 has the physical address of the current
234 * task's thread_struct.
235 */
236#define EXCEPTION_PROLOG \
237 mtspr SPRN_SPRG0,r10; \
238 mtspr SPRN_SPRG1,r11; \
239 mfcr r10; \
240 EXCEPTION_PROLOG_1; \
241 EXCEPTION_PROLOG_2
242
243#define EXCEPTION_PROLOG_1 \
244 mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
245 andi. r11,r11,MSR_PR; \
246 tophys(r11,r1); /* use tophys(r1) if kernel */ \
247 beq 1f; \
248 mfspr r11,SPRN_SPRG3; \
249 lwz r11,THREAD_INFO-THREAD(r11); \
250 addi r11,r11,THREAD_SIZE; \
251 tophys(r11,r11); \
2521: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
253
254
255#define EXCEPTION_PROLOG_2 \
256 CLR_TOP32(r11); \
257 stw r10,_CCR(r11); /* save registers */ \
258 stw r12,GPR12(r11); \
259 stw r9,GPR9(r11); \
260 mfspr r10,SPRN_SPRG0; \
261 stw r10,GPR10(r11); \
262 mfspr r12,SPRN_SPRG1; \
263 stw r12,GPR11(r11); \
264 mflr r10; \
265 stw r10,_LINK(r11); \
266 mfspr r12,SPRN_SRR0; \
267 mfspr r9,SPRN_SRR1; \
268 stw r1,GPR1(r11); \
269 stw r1,0(r11); \
270 tovirt(r1,r11); /* set new kernel sp */ \
271 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
272 MTMSRD(r10); /* (except for mach check in rtas) */ \
273 stw r0,GPR0(r11); \
274 lis r10,0x7265; /* put exception frame marker */ \
275 addi r10,r10,0x6773; \
276 stw r10,8(r11); \
277 SAVE_4GPRS(3, r11); \
278 SAVE_2GPRS(7, r11)
279
280/*
281 * Note: code which follows this uses cr0.eq (set if from kernel),
282 * r11, r12 (SRR0), and r9 (SRR1).
283 *
284 * Note2: once we have set r1 we are in a position to take exceptions
285 * again, and we could thus set MSR:RI at that point.
286 */
287
288/*
289 * Exception vectors.
290 */
291#define EXCEPTION(n, label, hdlr, xfer) \
292 . = n; \
293label: \
294 EXCEPTION_PROLOG; \
295 addi r3,r1,STACK_FRAME_OVERHEAD; \
296 xfer(n, hdlr)
297
298#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
299 li r10,trap; \
300 stw r10,_TRAP(r11); \
301 li r10,MSR_KERNEL; \
302 copyee(r10, r9); \
303 bl tfer; \
304i##n: \
305 .long hdlr; \
306 .long ret
307
308#define COPY_EE(d, s) rlwimi d,s,0,16,16
309#define NOCOPY(d, s)
310
311#define EXC_XFER_STD(n, hdlr) \
312 EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
313 ret_from_except_full)
314
315#define EXC_XFER_LITE(n, hdlr) \
316 EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
317 ret_from_except)
318
319#define EXC_XFER_EE(n, hdlr) \
320 EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
321 ret_from_except_full)
322
323#define EXC_XFER_EE_LITE(n, hdlr) \
324 EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
325 ret_from_except)
326
327/* System reset */
328/* core99 pmac starts the seconary here by changing the vector, and
329 putting it back to what it was (unknown_exception) when done. */
330#if defined(CONFIG_GEMINI) && defined(CONFIG_SMP)
331 . = 0x100
332 b __secondary_start_gemini
333#else
334 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
335#endif
336
337/* Machine check */
338/*
339 * On CHRP, this is complicated by the fact that we could get a
340 * machine check inside RTAS, and we have no guarantee that certain
341 * critical registers will have the values we expect. The set of
342 * registers that might have bad values includes all the GPRs
343 * and all the BATs. We indicate that we are in RTAS by putting
344 * a non-zero value, the address of the exception frame to use,
345 * in SPRG2. The machine check handler checks SPRG2 and uses its
346 * value if it is non-zero. If we ever needed to free up SPRG2,
347 * we could use a field in the thread_info or thread_struct instead.
348 * (Other exception handlers assume that r1 is a valid kernel stack
349 * pointer when we take an exception from supervisor mode.)
350 * -- paulus.
351 */
352 . = 0x200
353 mtspr SPRN_SPRG0,r10
354 mtspr SPRN_SPRG1,r11
355 mfcr r10
356#ifdef CONFIG_PPC_CHRP
357 mfspr r11,SPRN_SPRG2
358 cmpwi 0,r11,0
359 bne 7f
360#endif /* CONFIG_PPC_CHRP */
361 EXCEPTION_PROLOG_1
3627: EXCEPTION_PROLOG_2
363 addi r3,r1,STACK_FRAME_OVERHEAD
364#ifdef CONFIG_PPC_CHRP
365 mfspr r4,SPRN_SPRG2
366 cmpwi cr1,r4,0
367 bne cr1,1f
368#endif
369 EXC_XFER_STD(0x200, machine_check_exception)
370#ifdef CONFIG_PPC_CHRP
3711: b machine_check_in_rtas
372#endif
373
374/* Data access exception. */
375 . = 0x300
376DataAccess:
377 EXCEPTION_PROLOG
378 mfspr r10,SPRN_DSISR
379 andis. r0,r10,0xa470 /* weird error? */
380 bne 1f /* if not, try to put a PTE */
381 mfspr r4,SPRN_DAR /* into the hash table */
382 rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
383 bl hash_page
3841: stw r10,_DSISR(r11)
385 mr r5,r10
386 mfspr r4,SPRN_DAR
387 EXC_XFER_EE_LITE(0x300, handle_page_fault)
388
389
390/* Instruction access exception. */
391 . = 0x400
392InstructionAccess:
393 EXCEPTION_PROLOG
394 andis. r0,r9,0x4000 /* no pte found? */
395 beq 1f /* if so, try to put a PTE */
396 li r3,0 /* into the hash table */
397 mr r4,r12 /* SRR0 is fault address */
398 bl hash_page
3991: mr r4,r12
400 mr r5,r9
401 EXC_XFER_EE_LITE(0x400, handle_page_fault)
402
403/* External interrupt */
404 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
405
406/* Alignment exception */
407 . = 0x600
408Alignment:
409 EXCEPTION_PROLOG
410 mfspr r4,SPRN_DAR
411 stw r4,_DAR(r11)
412 mfspr r5,SPRN_DSISR
413 stw r5,_DSISR(r11)
414 addi r3,r1,STACK_FRAME_OVERHEAD
415 EXC_XFER_EE(0x600, alignment_exception)
416
417/* Program check exception */
418 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
419
420/* Floating-point unavailable */
421 . = 0x800
422FPUnavailable:
423 EXCEPTION_PROLOG
424 bne load_up_fpu /* if from user, just load it up */
425 addi r3,r1,STACK_FRAME_OVERHEAD
426 EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
427
428/* Decrementer */
429 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
430
431 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
432 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
433
434/* System call */
435 . = 0xc00
436SystemCall:
437 EXCEPTION_PROLOG
438 EXC_XFER_EE_LITE(0xc00, DoSyscall)
439
440/* Single step - not used on 601 */
441 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
442 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
443
444/*
445 * The Altivec unavailable trap is at 0x0f20. Foo.
446 * We effectively remap it to 0x3000.
447 * We include an altivec unavailable exception vector even if
448 * not configured for Altivec, so that you can't panic a
449 * non-altivec kernel running on a machine with altivec just
450 * by executing an altivec instruction.
451 */
452 . = 0xf00
453 b Trap_0f
454
455 . = 0xf20
456 b AltiVecUnavailable
457
458Trap_0f:
459 EXCEPTION_PROLOG
460 addi r3,r1,STACK_FRAME_OVERHEAD
461 EXC_XFER_EE(0xf00, unknown_exception)
462
463/*
464 * Handle TLB miss for instruction on 603/603e.
465 * Note: we get an alternate set of r0 - r3 to use automatically.
466 */
467 . = 0x1000
468InstructionTLBMiss:
469/*
470 * r0: stored ctr
471 * r1: linux style pte ( later becomes ppc hardware pte )
472 * r2: ptr to linux-style pte
473 * r3: scratch
474 */
475 mfctr r0
476 /* Get PTE (linux-style) and check access */
477 mfspr r3,SPRN_IMISS
478 lis r1,KERNELBASE@h /* check if kernel address */
479 cmplw 0,r3,r1
480 mfspr r2,SPRN_SPRG3
481 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
482 lwz r2,PGDIR(r2)
483 blt+ 112f
484 lis r2,swapper_pg_dir@ha /* if kernel address, use */
485 addi r2,r2,swapper_pg_dir@l /* kernel page table */
486 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
487 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
488112: tophys(r2,r2)
489 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
490 lwz r2,0(r2) /* get pmd entry */
491 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
492 beq- InstructionAddressInvalid /* return if no mapping */
493 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
494 lwz r3,0(r2) /* get linux-style pte */
495 andc. r1,r1,r3 /* check access & ~permission */
496 bne- InstructionAddressInvalid /* return if access not permitted */
497 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
498 /*
499 * NOTE! We are assuming this is not an SMP system, otherwise
500 * we would need to update the pte atomically with lwarx/stwcx.
501 */
502 stw r3,0(r2) /* update PTE (accessed bit) */
503 /* Convert linux-style PTE to low word of PPC-style PTE */
504 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */
505 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
506 and r1,r1,r2 /* writable if _RW and _DIRTY */
507 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
508 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
509 ori r1,r1,0xe14 /* clear out reserved bits and M */
510 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
511 mtspr SPRN_RPA,r1
512 mfspr r3,SPRN_IMISS
513 tlbli r3
514 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
515 mtcrf 0x80,r3
516 rfi
517InstructionAddressInvalid:
518 mfspr r3,SPRN_SRR1
519 rlwinm r1,r3,9,6,6 /* Get load/store bit */
520
521 addis r1,r1,0x2000
522 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
523 mtctr r0 /* Restore CTR */
524 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
525 or r2,r2,r1
526 mtspr SPRN_SRR1,r2
527 mfspr r1,SPRN_IMISS /* Get failing address */
528 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
529 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
530 xor r1,r1,r2
531 mtspr SPRN_DAR,r1 /* Set fault address */
532 mfmsr r0 /* Restore "normal" registers */
533 xoris r0,r0,MSR_TGPR>>16
534 mtcrf 0x80,r3 /* Restore CR0 */
535 mtmsr r0
536 b InstructionAccess
537
538/*
539 * Handle TLB miss for DATA Load operation on 603/603e
540 */
541 . = 0x1100
542DataLoadTLBMiss:
543/*
544 * r0: stored ctr
545 * r1: linux style pte ( later becomes ppc hardware pte )
546 * r2: ptr to linux-style pte
547 * r3: scratch
548 */
549 mfctr r0
550 /* Get PTE (linux-style) and check access */
551 mfspr r3,SPRN_DMISS
552 lis r1,KERNELBASE@h /* check if kernel address */
553 cmplw 0,r3,r1
554 mfspr r2,SPRN_SPRG3
555 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
556 lwz r2,PGDIR(r2)
557 blt+ 112f
558 lis r2,swapper_pg_dir@ha /* if kernel address, use */
559 addi r2,r2,swapper_pg_dir@l /* kernel page table */
560 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
561 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
562112: tophys(r2,r2)
563 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
564 lwz r2,0(r2) /* get pmd entry */
565 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
566 beq- DataAddressInvalid /* return if no mapping */
567 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
568 lwz r3,0(r2) /* get linux-style pte */
569 andc. r1,r1,r3 /* check access & ~permission */
570 bne- DataAddressInvalid /* return if access not permitted */
571 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
572 /*
573 * NOTE! We are assuming this is not an SMP system, otherwise
574 * we would need to update the pte atomically with lwarx/stwcx.
575 */
576 stw r3,0(r2) /* update PTE (accessed bit) */
577 /* Convert linux-style PTE to low word of PPC-style PTE */
578 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */
579 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
580 and r1,r1,r2 /* writable if _RW and _DIRTY */
581 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
582 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
583 ori r1,r1,0xe14 /* clear out reserved bits and M */
584 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
585 mtspr SPRN_RPA,r1
586 mfspr r3,SPRN_DMISS
587 tlbld r3
588 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
589 mtcrf 0x80,r3
590 rfi
591DataAddressInvalid:
592 mfspr r3,SPRN_SRR1
593 rlwinm r1,r3,9,6,6 /* Get load/store bit */
594 addis r1,r1,0x2000
595 mtspr SPRN_DSISR,r1
596 mtctr r0 /* Restore CTR */
597 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
598 mtspr SPRN_SRR1,r2
599 mfspr r1,SPRN_DMISS /* Get failing address */
600 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
601 beq 20f /* Jump if big endian */
602 xori r1,r1,3
60320: mtspr SPRN_DAR,r1 /* Set fault address */
604 mfmsr r0 /* Restore "normal" registers */
605 xoris r0,r0,MSR_TGPR>>16
606 mtcrf 0x80,r3 /* Restore CR0 */
607 mtmsr r0
608 b DataAccess
609
610/*
611 * Handle TLB miss for DATA Store on 603/603e
612 */
613 . = 0x1200
614DataStoreTLBMiss:
615/*
616 * r0: stored ctr
617 * r1: linux style pte ( later becomes ppc hardware pte )
618 * r2: ptr to linux-style pte
619 * r3: scratch
620 */
621 mfctr r0
622 /* Get PTE (linux-style) and check access */
623 mfspr r3,SPRN_DMISS
624 lis r1,KERNELBASE@h /* check if kernel address */
625 cmplw 0,r3,r1
626 mfspr r2,SPRN_SPRG3
627 li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */
628 lwz r2,PGDIR(r2)
629 blt+ 112f
630 lis r2,swapper_pg_dir@ha /* if kernel address, use */
631 addi r2,r2,swapper_pg_dir@l /* kernel page table */
632 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
633 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
634112: tophys(r2,r2)
635 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
636 lwz r2,0(r2) /* get pmd entry */
637 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
638 beq- DataAddressInvalid /* return if no mapping */
639 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
640 lwz r3,0(r2) /* get linux-style pte */
641 andc. r1,r1,r3 /* check access & ~permission */
642 bne- DataAddressInvalid /* return if access not permitted */
643 ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY
644 /*
645 * NOTE! We are assuming this is not an SMP system, otherwise
646 * we would need to update the pte atomically with lwarx/stwcx.
647 */
648 stw r3,0(r2) /* update PTE (accessed/dirty bits) */
649 /* Convert linux-style PTE to low word of PPC-style PTE */
650 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
651 li r1,0xe15 /* clear out reserved bits and M */
652 andc r1,r3,r1 /* PP = user? 2: 0 */
653 mtspr SPRN_RPA,r1
654 mfspr r3,SPRN_DMISS
655 tlbld r3
656 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
657 mtcrf 0x80,r3
658 rfi
659
660#ifndef CONFIG_ALTIVEC
661#define altivec_assist_exception unknown_exception
662#endif
663
664 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
665 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
666 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
667 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
668 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
669 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
670 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
671 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
672 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
673 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
674 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
675 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
676 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
677 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
678 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE)
679 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE)
680 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE)
681 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE)
682 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE)
683 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE)
684 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE)
685 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE)
686 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE)
687 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE)
688 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE)
689 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
690 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
691 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
692 EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE)
693
694 .globl mol_trampoline
695 .set mol_trampoline, i0x2f00
696
697 . = 0x3000
698
699AltiVecUnavailable:
700 EXCEPTION_PROLOG
701#ifdef CONFIG_ALTIVEC
702 bne load_up_altivec /* if from user, just load it up */
703#endif /* CONFIG_ALTIVEC */
704 EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
705
706#ifdef CONFIG_ALTIVEC
707/* Note that the AltiVec support is closely modeled after the FP
708 * support. Changes to one are likely to be applicable to the
709 * other! */
710load_up_altivec:
711/*
712 * Disable AltiVec for the task which had AltiVec previously,
713 * and save its AltiVec registers in its thread_struct.
714 * Enables AltiVec for use in the kernel on return.
715 * On SMP we know the AltiVec units are free, since we give it up every
716 * switch. -- Kumar
717 */
718 mfmsr r5
719 oris r5,r5,MSR_VEC@h
720 MTMSRD(r5) /* enable use of AltiVec now */
721 isync
722/*
723 * For SMP, we don't do lazy AltiVec switching because it just gets too
724 * horrendously complex, especially when a task switches from one CPU
725 * to another. Instead we call giveup_altivec in switch_to.
726 */
727#ifndef CONFIG_SMP
728 tophys(r6,0)
729 addis r3,r6,last_task_used_altivec@ha
730 lwz r4,last_task_used_altivec@l(r3)
731 cmpwi 0,r4,0
732 beq 1f
733 add r4,r4,r6
734 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
735 SAVE_32VRS(0,r10,r4)
736 mfvscr vr0
737 li r10,THREAD_VSCR
738 stvx vr0,r10,r4
739 lwz r5,PT_REGS(r4)
740 add r5,r5,r6
741 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
742 lis r10,MSR_VEC@h
743 andc r4,r4,r10 /* disable altivec for previous task */
744 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
7451:
746#endif /* CONFIG_SMP */
747 /* enable use of AltiVec after return */
748 oris r9,r9,MSR_VEC@h
749 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
750 li r4,1
751 li r10,THREAD_VSCR
752 stw r4,THREAD_USED_VR(r5)
753 lvx vr0,r10,r5
754 mtvscr vr0
755 REST_32VRS(0,r10,r5)
756#ifndef CONFIG_SMP
757 subi r4,r5,THREAD
758 sub r4,r4,r6
759 stw r4,last_task_used_altivec@l(r3)
760#endif /* CONFIG_SMP */
761 /* restore registers and return */
762 /* we haven't used ctr or xer or lr */
763 b fast_exception_return
764
765/*
766 * AltiVec unavailable trap from kernel - print a message, but let
767 * the task use AltiVec in the kernel until it returns to user mode.
768 */
769KernelAltiVec:
770 lwz r3,_MSR(r1)
771 oris r3,r3,MSR_VEC@h
772 stw r3,_MSR(r1) /* enable use of AltiVec after return */
773 lis r3,87f@h
774 ori r3,r3,87f@l
775 mr r4,r2 /* current */
776 lwz r5,_NIP(r1)
777 bl printk
778 b ret_from_except
77987: .string "AltiVec used in kernel (task=%p, pc=%x) \n"
780 .align 4,0
781
782/*
783 * giveup_altivec(tsk)
784 * Disable AltiVec for the task given as the argument,
785 * and save the AltiVec registers in its thread_struct.
786 * Enables AltiVec for use in the kernel on return.
787 */
788
789 .globl giveup_altivec
790giveup_altivec:
791 mfmsr r5
792 oris r5,r5,MSR_VEC@h
793 SYNC
794 MTMSRD(r5) /* enable use of AltiVec now */
795 isync
796 cmpwi 0,r3,0
797 beqlr- /* if no previous owner, done */
798 addi r3,r3,THREAD /* want THREAD of task */
799 lwz r5,PT_REGS(r3)
800 cmpwi 0,r5,0
801 SAVE_32VRS(0, r4, r3)
802 mfvscr vr0
803 li r4,THREAD_VSCR
804 stvx vr0,r4,r3
805 beq 1f
806 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
807 lis r3,MSR_VEC@h
808 andc r4,r4,r3 /* disable AltiVec for previous task */
809 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8101:
811#ifndef CONFIG_SMP
812 li r5,0
813 lis r4,last_task_used_altivec@ha
814 stw r5,last_task_used_altivec@l(r4)
815#endif /* CONFIG_SMP */
816 blr
817#endif /* CONFIG_ALTIVEC */
818
819/*
820 * This code is jumped to from the startup code to copy
821 * the kernel image to physical address 0.
822 */
823relocate_kernel:
824 addis r9,r26,klimit@ha /* fetch klimit */
825 lwz r25,klimit@l(r9)
826 addis r25,r25,-KERNELBASE@h
827 li r3,0 /* Destination base address */
828 li r6,0 /* Destination offset */
829 li r5,0x4000 /* # bytes of memory to copy */
830 bl copy_and_flush /* copy the first 0x4000 bytes */
831 addi r0,r3,4f@l /* jump to the address of 4f */
832 mtctr r0 /* in copy and do the rest. */
833 bctr /* jump to the copy */
8344: mr r5,r25
835 bl copy_and_flush /* copy the rest */
836 b turn_on_mmu
837
838/*
839 * Copy routine used to copy the kernel to start at physical address 0
840 * and flush and invalidate the caches as needed.
841 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
842 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
843 */
844_GLOBAL(copy_and_flush)
845 addi r5,r5,-4
846 addi r6,r6,-4
8474: li r0,L1_CACHE_BYTES/4
848 mtctr r0
8493: addi r6,r6,4 /* copy a cache line */
850 lwzx r0,r6,r4
851 stwx r0,r6,r3
852 bdnz 3b
853 dcbst r6,r3 /* write it to memory */
854 sync
855 icbi r6,r3 /* flush the icache line */
856 cmplw 0,r6,r5
857 blt 4b
858 sync /* additional sync needed on g4 */
859 isync
860 addi r5,r5,4
861 addi r6,r6,4
862 blr
863
864#ifdef CONFIG_APUS
865/*
866 * On APUS the physical base address of the kernel is not known at compile
867 * time, which means the __pa/__va constants used are incorrect. In the
868 * __init section is recorded the virtual addresses of instructions using
869 * these constants, so all that has to be done is fix these before
870 * continuing the kernel boot.
871 *
872 * r4 = The physical address of the kernel base.
873 */
874fix_mem_constants:
875 mr r10,r4
876 addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */
877 neg r11,r10 /* phys_to_virt constant */
878
879 lis r12,__vtop_table_begin@h
880 ori r12,r12,__vtop_table_begin@l
881 add r12,r12,r10 /* table begin phys address */
882 lis r13,__vtop_table_end@h
883 ori r13,r13,__vtop_table_end@l
884 add r13,r13,r10 /* table end phys address */
885 subi r12,r12,4
886 subi r13,r13,4
8871: lwzu r14,4(r12) /* virt address of instruction */
888 add r14,r14,r10 /* phys address of instruction */
889 lwz r15,0(r14) /* instruction, now insert top */
890 rlwimi r15,r10,16,16,31 /* half of vp const in low half */
891 stw r15,0(r14) /* of instruction and restore. */
892 dcbst r0,r14 /* write it to memory */
893 sync
894 icbi r0,r14 /* flush the icache line */
895 cmpw r12,r13
896 bne 1b
897 sync /* additional sync needed on g4 */
898 isync
899
900/*
901 * Map the memory where the exception handlers will
902 * be copied to when hash constants have been patched.
903 */
904#ifdef CONFIG_APUS_FAST_EXCEPT
905 lis r8,0xfff0
906#else
907 lis r8,0
908#endif
909 ori r8,r8,0x2 /* 128KB, supervisor */
910 mtspr SPRN_DBAT3U,r8
911 mtspr SPRN_DBAT3L,r8
912
913 lis r12,__ptov_table_begin@h
914 ori r12,r12,__ptov_table_begin@l
915 add r12,r12,r10 /* table begin phys address */
916 lis r13,__ptov_table_end@h
917 ori r13,r13,__ptov_table_end@l
918 add r13,r13,r10 /* table end phys address */
919 subi r12,r12,4
920 subi r13,r13,4
9211: lwzu r14,4(r12) /* virt address of instruction */
922 add r14,r14,r10 /* phys address of instruction */
923 lwz r15,0(r14) /* instruction, now insert top */
924 rlwimi r15,r11,16,16,31 /* half of pv const in low half*/
925 stw r15,0(r14) /* of instruction and restore. */
926 dcbst r0,r14 /* write it to memory */
927 sync
928 icbi r0,r14 /* flush the icache line */
929 cmpw r12,r13
930 bne 1b
931
932 sync /* additional sync needed on g4 */
933 isync /* No speculative loading until now */
934 blr
935
936/***********************************************************************
937 * Please note that on APUS the exception handlers are located at the
938 * physical address 0xfff0000. For this reason, the exception handlers
939 * cannot use relative branches to access the code below.
940 ***********************************************************************/
941#endif /* CONFIG_APUS */
942
943#ifdef CONFIG_SMP
944#ifdef CONFIG_GEMINI
945 .globl __secondary_start_gemini
946__secondary_start_gemini:
947 mfspr r4,SPRN_HID0
948 ori r4,r4,HID0_ICFI
949 li r3,0
950 ori r3,r3,HID0_ICE
951 andc r4,r4,r3
952 mtspr SPRN_HID0,r4
953 sync
954 b __secondary_start
955#endif /* CONFIG_GEMINI */
956
957 .globl __secondary_start_pmac_0
958__secondary_start_pmac_0:
959 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
960 li r24,0
961 b 1f
962 li r24,1
963 b 1f
964 li r24,2
965 b 1f
966 li r24,3
9671:
968 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
969 set to map the 0xf0000000 - 0xffffffff region */
970 mfmsr r0
971 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
972 SYNC
973 mtmsr r0
974 isync
975
976 .globl __secondary_start
977__secondary_start:
978 /* Copy some CPU settings from CPU 0 */
979 bl __restore_cpu_setup
980
981 lis r3,-KERNELBASE@h
982 mr r4,r24
983 bl call_setup_cpu /* Call setup_cpu for this CPU */
984#ifdef CONFIG_6xx
985 lis r3,-KERNELBASE@h
986 bl init_idle_6xx
987#endif /* CONFIG_6xx */
988
989 /* get current_thread_info and current */
990 lis r1,secondary_ti@ha
991 tophys(r1,r1)
992 lwz r1,secondary_ti@l(r1)
993 tophys(r2,r1)
994 lwz r2,TI_TASK(r2)
995
996 /* stack */
997 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
998 li r0,0
999 tophys(r3,r1)
1000 stw r0,0(r3)
1001
1002 /* load up the MMU */
1003 bl load_up_mmu
1004
1005 /* ptr to phys current thread */
1006 tophys(r4,r2)
1007 addi r4,r4,THREAD /* phys address of our thread_struct */
1008 CLR_TOP32(r4)
1009 mtspr SPRN_SPRG3,r4
1010 li r3,0
1011 mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */
1012
1013 /* enable MMU and jump to start_secondary */
1014 li r4,MSR_KERNEL
1015 FIX_SRR1(r4,r5)
1016 lis r3,start_secondary@h
1017 ori r3,r3,start_secondary@l
1018 mtspr SPRN_SRR0,r3
1019 mtspr SPRN_SRR1,r4
1020 SYNC
1021 RFI
1022#endif /* CONFIG_SMP */
1023
1024/*
1025 * Those generic dummy functions are kept for CPUs not
1026 * included in CONFIG_6xx
1027 */
1028#if !defined(CONFIG_6xx)
1029_GLOBAL(__save_cpu_setup)
1030 blr
1031_GLOBAL(__restore_cpu_setup)
1032 blr
1033#endif /* !defined(CONFIG_6xx) */
1034
1035
1036/*
1037 * Load stuff into the MMU. Intended to be called with
1038 * IR=0 and DR=0.
1039 */
1040load_up_mmu:
1041 sync /* Force all PTE updates to finish */
1042 isync
1043 tlbia /* Clear all TLB entries */
1044 sync /* wait for tlbia/tlbie to finish */
1045 TLBSYNC /* ... on all CPUs */
1046 /* Load the SDR1 register (hash table base & size) */
1047 lis r6,_SDR1@ha
1048 tophys(r6,r6)
1049 lwz r6,_SDR1@l(r6)
1050 mtspr SPRN_SDR1,r6
1051 li r0,16 /* load up segment register values */
1052 mtctr r0 /* for context 0 */
1053 lis r3,0x2000 /* Ku = 1, VSID = 0 */
1054 li r4,0
10553: mtsrin r3,r4
1056 addi r3,r3,0x111 /* increment VSID */
1057 addis r4,r4,0x1000 /* address of next segment */
1058 bdnz 3b
1059
1060/* Load the BAT registers with the values set up by MMU_init.
1061 MMU_init takes care of whether we're on a 601 or not. */
1062 mfpvr r3
1063 srwi r3,r3,16
1064 cmpwi r3,1
1065 lis r3,BATS@ha
1066 addi r3,r3,BATS@l
1067 tophys(r3,r3)
1068 LOAD_BAT(0,r3,r4,r5)
1069 LOAD_BAT(1,r3,r4,r5)
1070 LOAD_BAT(2,r3,r4,r5)
1071 LOAD_BAT(3,r3,r4,r5)
1072
1073 blr
1074
1075/*
1076 * This is where the main kernel code starts.
1077 */
1078start_here:
1079 /* ptr to current */
1080 lis r2,init_task@h
1081 ori r2,r2,init_task@l
1082 /* Set up for using our exception vectors */
1083 /* ptr to phys current thread */
1084 tophys(r4,r2)
1085 addi r4,r4,THREAD /* init task's THREAD */
1086 CLR_TOP32(r4)
1087 mtspr SPRN_SPRG3,r4
1088 li r3,0
1089 mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */
1090
1091 /* stack */
1092 lis r1,init_thread_union@ha
1093 addi r1,r1,init_thread_union@l
1094 li r0,0
1095 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1096/*
1097 * Do early platform-specific initialization,
1098 * and set up the MMU.
1099 */
1100 mr r3,r31
1101 mr r4,r30
1102 bl machine_init
1103 bl MMU_init
1104
1105#ifdef CONFIG_APUS
1106 /* Copy exception code to exception vector base on APUS. */
1107 lis r4,KERNELBASE@h
1108#ifdef CONFIG_APUS_FAST_EXCEPT
1109 lis r3,0xfff0 /* Copy to 0xfff00000 */
1110#else
1111 lis r3,0 /* Copy to 0x00000000 */
1112#endif
1113 li r5,0x4000 /* # bytes of memory to copy */
1114 li r6,0
1115 bl copy_and_flush /* copy the first 0x4000 bytes */
1116#endif /* CONFIG_APUS */
1117
1118/*
1119 * Go back to running unmapped so we can load up new values
1120 * for SDR1 (hash table pointer) and the segment registers
1121 * and change to using our exception vectors.
1122 */
1123 lis r4,2f@h
1124 ori r4,r4,2f@l
1125 tophys(r4,r4)
1126 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1127 FIX_SRR1(r3,r5)
1128 mtspr SPRN_SRR0,r4
1129 mtspr SPRN_SRR1,r3
1130 SYNC
1131 RFI
1132/* Load up the kernel context */
11332: bl load_up_mmu
1134
1135#ifdef CONFIG_BDI_SWITCH
1136 /* Add helper information for the Abatron bdiGDB debugger.
1137 * We do this here because we know the mmu is disabled, and
1138 * will be enabled for real in just a few instructions.
1139 */
1140 lis r5, abatron_pteptrs@h
1141 ori r5, r5, abatron_pteptrs@l
1142 stw r5, 0xf0(r0) /* This much match your Abatron config */
1143 lis r6, swapper_pg_dir@h
1144 ori r6, r6, swapper_pg_dir@l
1145 tophys(r5, r5)
1146 stw r6, 0(r5)
1147#endif /* CONFIG_BDI_SWITCH */
1148
1149/* Now turn on the MMU for real! */
1150 li r4,MSR_KERNEL
1151 FIX_SRR1(r4,r5)
1152 lis r3,start_kernel@h
1153 ori r3,r3,start_kernel@l
1154 mtspr SPRN_SRR0,r3
1155 mtspr SPRN_SRR1,r4
1156 SYNC
1157 RFI
1158
1159/*
1160 * Set up the segment registers for a new context.
1161 */
1162_GLOBAL(set_context)
1163 mulli r3,r3,897 /* multiply context by skew factor */
1164 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
1165 addis r3,r3,0x6000 /* Set Ks, Ku bits */
1166 li r0,NUM_USER_SEGMENTS
1167 mtctr r0
1168
1169#ifdef CONFIG_BDI_SWITCH
1170 /* Context switch the PTE pointer for the Abatron BDI2000.
1171 * The PGDIR is passed as second argument.
1172 */
1173 lis r5, KERNELBASE@h
1174 lwz r5, 0xf0(r5)
1175 stw r4, 0x4(r5)
1176#endif
1177 li r4,0
1178 isync
11793:
1180 mtsrin r3,r4
1181 addi r3,r3,0x111 /* next VSID */
1182 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
1183 addis r4,r4,0x1000 /* address of next segment */
1184 bdnz 3b
1185 sync
1186 isync
1187 blr
1188
1189/*
1190 * An undocumented "feature" of 604e requires that the v bit
1191 * be cleared before changing BAT values.
1192 *
1193 * Also, newer IBM firmware does not clear bat3 and 4 so
1194 * this makes sure it's done.
1195 * -- Cort
1196 */
1197clear_bats:
1198 li r10,0
1199 mfspr r9,SPRN_PVR
1200 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1201 cmpwi r9, 1
1202 beq 1f
1203
1204 mtspr SPRN_DBAT0U,r10
1205 mtspr SPRN_DBAT0L,r10
1206 mtspr SPRN_DBAT1U,r10
1207 mtspr SPRN_DBAT1L,r10
1208 mtspr SPRN_DBAT2U,r10
1209 mtspr SPRN_DBAT2L,r10
1210 mtspr SPRN_DBAT3U,r10
1211 mtspr SPRN_DBAT3L,r10
12121:
1213 mtspr SPRN_IBAT0U,r10
1214 mtspr SPRN_IBAT0L,r10
1215 mtspr SPRN_IBAT1U,r10
1216 mtspr SPRN_IBAT1L,r10
1217 mtspr SPRN_IBAT2U,r10
1218 mtspr SPRN_IBAT2L,r10
1219 mtspr SPRN_IBAT3U,r10
1220 mtspr SPRN_IBAT3L,r10
1221BEGIN_FTR_SECTION
1222 /* Here's a tweak: at this point, CPU setup have
1223 * not been called yet, so HIGH_BAT_EN may not be
1224 * set in HID0 for the 745x processors. However, it
1225 * seems that doesn't affect our ability to actually
1226 * write to these SPRs.
1227 */
1228 mtspr SPRN_DBAT4U,r10
1229 mtspr SPRN_DBAT4L,r10
1230 mtspr SPRN_DBAT5U,r10
1231 mtspr SPRN_DBAT5L,r10
1232 mtspr SPRN_DBAT6U,r10
1233 mtspr SPRN_DBAT6L,r10
1234 mtspr SPRN_DBAT7U,r10
1235 mtspr SPRN_DBAT7L,r10
1236 mtspr SPRN_IBAT4U,r10
1237 mtspr SPRN_IBAT4L,r10
1238 mtspr SPRN_IBAT5U,r10
1239 mtspr SPRN_IBAT5L,r10
1240 mtspr SPRN_IBAT6U,r10
1241 mtspr SPRN_IBAT6L,r10
1242 mtspr SPRN_IBAT7U,r10
1243 mtspr SPRN_IBAT7L,r10
1244END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
1245 blr
1246
1247flush_tlbs:
1248 lis r10, 0x40
12491: addic. r10, r10, -0x1000
1250 tlbie r10
1251 blt 1b
1252 sync
1253 blr
1254
1255mmu_off:
1256 addi r4, r3, __after_mmu_off - _start
1257 mfmsr r3
1258 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1259 beqlr
1260 andc r3,r3,r0
1261 mtspr SPRN_SRR0,r4
1262 mtspr SPRN_SRR1,r3
1263 sync
1264 RFI
1265
1266/*
1267 * Use the first pair of BAT registers to map the 1st 16MB
1268 * of RAM to KERNELBASE. From this point on we can't safely
1269 * call OF any more.
1270 */
1271initial_bats:
1272 lis r11,KERNELBASE@h
1273 mfspr r9,SPRN_PVR
1274 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1275 cmpwi 0,r9,1
1276 bne 4f
1277 ori r11,r11,4 /* set up BAT registers for 601 */
1278 li r8,0x7f /* valid, block length = 8MB */
1279 oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
1280 oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
1281 mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
1282 mtspr SPRN_IBAT0L,r8 /* lower BAT register */
1283 mtspr SPRN_IBAT1U,r9
1284 mtspr SPRN_IBAT1L,r10
1285 isync
1286 blr
1287
12884: tophys(r8,r11)
1289#ifdef CONFIG_SMP
1290 ori r8,r8,0x12 /* R/W access, M=1 */
1291#else
1292 ori r8,r8,2 /* R/W access */
1293#endif /* CONFIG_SMP */
1294#ifdef CONFIG_APUS
1295 ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */
1296#else
1297 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1298#endif /* CONFIG_APUS */
1299
1300 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
1301 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1302 mtspr SPRN_IBAT0L,r8
1303 mtspr SPRN_IBAT0U,r11
1304 isync
1305 blr
1306
1307
1308#ifdef CONFIG_8260
1309/* Jump into the system reset for the rom.
1310 * We first disable the MMU, and then jump to the ROM reset address.
1311 *
1312 * r3 is the board info structure, r4 is the location for starting.
1313 * I use this for building a small kernel that can load other kernels,
1314 * rather than trying to write or rely on a rom monitor that can tftp load.
1315 */
1316 .globl m8260_gorom
1317m8260_gorom:
1318 mfmsr r0
1319 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1320 sync
1321 mtmsr r0
1322 sync
1323 mfspr r11, SPRN_HID0
1324 lis r10, 0
1325 ori r10,r10,HID0_ICE|HID0_DCE
1326 andc r11, r11, r10
1327 mtspr SPRN_HID0, r11
1328 isync
1329 li r5, MSR_ME|MSR_RI
1330 lis r6,2f@h
1331 addis r6,r6,-KERNELBASE@h
1332 ori r6,r6,2f@l
1333 mtspr SPRN_SRR0,r6
1334 mtspr SPRN_SRR1,r5
1335 isync
1336 sync
1337 rfi
13382:
1339 mtlr r4
1340 blr
1341#endif
1342
1343
1344/*
1345 * We put a few things here that have to be page-aligned.
1346 * This stuff goes at the beginning of the data segment,
1347 * which is page-aligned.
1348 */
1349 .data
1350 .globl sdata
1351sdata:
1352 .globl empty_zero_page
1353empty_zero_page:
1354 .space 4096
1355
1356 .globl swapper_pg_dir
1357swapper_pg_dir:
1358 .space 4096
1359
1360/*
1361 * This space gets a copy of optional info passed to us by the bootstrap
1362 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
1363 */
1364 .globl cmd_line
1365cmd_line:
1366 .space 512
1367
1368 .globl intercept_table
1369intercept_table:
1370 .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700
1371 .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0
1372 .long 0, 0, 0, i0x1300, 0, 0, 0, 0
1373 .long 0, 0, 0, 0, 0, 0, 0, 0
1374 .long 0, 0, 0, 0, 0, 0, 0, 0
1375 .long 0, 0, 0, 0, 0, 0, 0, 0
1376
1377/* Room for two PTE pointers, usually the kernel and current user pointers
1378 * to their respective root page table.
1379 */
1380abatron_pteptrs:
1381 .space 8
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
new file mode 100644
index 000000000000..8b49679fad54
--- /dev/null
+++ b/arch/powerpc/kernel/head_44x.S
@@ -0,0 +1,782 @@
1/*
2 * arch/ppc/kernel/head_44x.S
3 *
4 * Kernel execution entry point code.
5 *
6 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
7 * Initial PowerPC version.
8 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Rewritten for PReP
10 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
11 * Low-level exception handers, MMU support, and rewrite.
12 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
13 * PowerPC 8xx modifications.
14 * Copyright (c) 1998-1999 TiVo, Inc.
15 * PowerPC 403GCX modifications.
16 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
17 * PowerPC 403GCX/405GP modifications.
18 * Copyright 2000 MontaVista Software Inc.
19 * PPC405 modifications
20 * PowerPC 403GCX/405GP modifications.
21 * Author: MontaVista Software, Inc.
22 * frank_rowand@mvista.com or source@mvista.com
23 * debbie_chu@mvista.com
24 * Copyright 2002-2005 MontaVista Software, Inc.
25 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
26 *
27 * This program is free software; you can redistribute it and/or modify it
28 * under the terms of the GNU General Public License as published by the
29 * Free Software Foundation; either version 2 of the License, or (at your
30 * option) any later version.
31 */
32
33#include <linux/config.h>
34#include <asm/processor.h>
35#include <asm/page.h>
36#include <asm/mmu.h>
37#include <asm/pgtable.h>
38#include <asm/ibm4xx.h>
39#include <asm/ibm44x.h>
40#include <asm/cputable.h>
41#include <asm/thread_info.h>
42#include <asm/ppc_asm.h>
43#include <asm/asm-offsets.h>
44#include "head_booke.h"
45
46
47/* As with the other PowerPC ports, it is expected that when code
48 * execution begins here, the following registers contain valid, yet
49 * optional, information:
50 *
51 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
52 * r4 - Starting address of the init RAM disk
53 * r5 - Ending address of the init RAM disk
54 * r6 - Start of kernel command line string (e.g. "mem=128")
55 * r7 - End of kernel command line string
56 *
57 */
58 .text
59_GLOBAL(_stext)
60_GLOBAL(_start)
61 /*
62 * Reserve a word at a fixed location to store the address
63 * of abatron_pteptrs
64 */
65 nop
66/*
67 * Save parameters we are passed
68 */
69 mr r31,r3
70 mr r30,r4
71 mr r29,r5
72 mr r28,r6
73 mr r27,r7
74 li r24,0 /* CPU number */
75
76/*
77 * Set up the initial MMU state
78 *
79 * We are still executing code at the virtual address
80 * mappings set by the firmware for the base of RAM.
81 *
82 * We first invalidate all TLB entries but the one
83 * we are running from. We then load the KERNELBASE
84 * mappings so we can begin to use kernel addresses
85 * natively and so the interrupt vector locations are
86 * permanently pinned (necessary since Book E
87 * implementations always have translation enabled).
88 *
89 * TODO: Use the known TLB entry we are running from to
90 * determine which physical region we are located
91 * in. This can be used to determine where in RAM
92 * (on a shared CPU system) or PCI memory space
93 * (on a DRAMless system) we are located.
94 * For now, we assume a perfect world which means
95 * we are located at the base of DRAM (physical 0).
96 */
97
98/*
99 * Search TLB for entry that we are currently using.
100 * Invalidate all entries but the one we are using.
101 */
102 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
103 mfspr r3,SPRN_PID /* Get PID */
104 mfmsr r4 /* Get MSR */
105 andi. r4,r4,MSR_IS@l /* TS=1? */
106 beq wmmucr /* If not, leave STS=0 */
107 oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
108wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
109 sync
110
111 bl invstr /* Find our address */
112invstr: mflr r5 /* Make it accessible */
113 tlbsx r23,0,r5 /* Find entry we are in */
114 li r4,0 /* Start at TLB entry 0 */
115 li r3,0 /* Set PAGEID inval value */
1161: cmpw r23,r4 /* Is this our entry? */
117 beq skpinv /* If so, skip the inval */
118 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
119skpinv: addi r4,r4,1 /* Increment */
120 cmpwi r4,64 /* Are we done? */
121 bne 1b /* If not, repeat */
122 isync /* If so, context change */
123
124/*
125 * Configure and load pinned entry into TLB slot 63.
126 */
127
128 lis r3,KERNELBASE@h /* Load the kernel virtual address */
129 ori r3,r3,KERNELBASE@l
130
131 /* Kernel is at the base of RAM */
132 li r4, 0 /* Load the kernel physical address */
133
134 /* Load the kernel PID = 0 */
135 li r0,0
136 mtspr SPRN_PID,r0
137 sync
138
139 /* Initialize MMUCR */
140 li r5,0
141 mtspr SPRN_MMUCR,r5
142 sync
143
144 /* pageid fields */
145 clrrwi r3,r3,10 /* Mask off the effective page number */
146 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
147
148 /* xlat fields */
149 clrrwi r4,r4,10 /* Mask off the real page number */
150 /* ERPN is 0 for first 4GB page */
151
152 /* attrib fields */
153 /* Added guarded bit to protect against speculative loads/stores */
154 li r5,0
155 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
156
157 li r0,63 /* TLB slot 63 */
158
159 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
160 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
161 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
162
163 /* Force context change */
164 mfmsr r0
165 mtspr SPRN_SRR1, r0
166 lis r0,3f@h
167 ori r0,r0,3f@l
168 mtspr SPRN_SRR0,r0
169 sync
170 rfi
171
172 /* If necessary, invalidate original entry we used */
1733: cmpwi r23,63
174 beq 4f
175 li r6,0
176 tlbwe r6,r23,PPC44x_TLB_PAGEID
177 isync
178
1794:
180#ifdef CONFIG_SERIAL_TEXT_DEBUG
181 /*
182 * Add temporary UART mapping for early debug.
183 * We can map UART registers wherever we want as long as they don't
184 * interfere with other system mappings (e.g. with pinned entries).
185 * For an example of how we handle this - see ocotea.h. --ebs
186 */
187 /* pageid fields */
188 lis r3,UART0_IO_BASE@h
189 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_4K
190
191 /* xlat fields */
192 lis r4,UART0_PHYS_IO_BASE@h /* RPN depends on SoC */
193#ifndef CONFIG_440EP
194 ori r4,r4,0x0001 /* ERPN is 1 for second 4GB page */
195#endif
196
197 /* attrib fields */
198 li r5,0
199 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_I | PPC44x_TLB_G)
200
201 li r0,0 /* TLB slot 0 */
202
203 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
204 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
205 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
206
207 /* Force context change */
208 isync
209#endif /* CONFIG_SERIAL_TEXT_DEBUG */
210
211 /* Establish the interrupt vector offsets */
212 SET_IVOR(0, CriticalInput);
213 SET_IVOR(1, MachineCheck);
214 SET_IVOR(2, DataStorage);
215 SET_IVOR(3, InstructionStorage);
216 SET_IVOR(4, ExternalInput);
217 SET_IVOR(5, Alignment);
218 SET_IVOR(6, Program);
219 SET_IVOR(7, FloatingPointUnavailable);
220 SET_IVOR(8, SystemCall);
221 SET_IVOR(9, AuxillaryProcessorUnavailable);
222 SET_IVOR(10, Decrementer);
223 SET_IVOR(11, FixedIntervalTimer);
224 SET_IVOR(12, WatchdogTimer);
225 SET_IVOR(13, DataTLBError);
226 SET_IVOR(14, InstructionTLBError);
227 SET_IVOR(15, Debug);
228
229 /* Establish the interrupt vector base */
230 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
231 mtspr SPRN_IVPR,r4
232
233#ifdef CONFIG_440EP
234 /* Clear DAPUIB flag in CCR0 (enable APU between CPU and FPU) */
235 mfspr r2,SPRN_CCR0
236 lis r3,0xffef
237 ori r3,r3,0xffff
238 and r2,r2,r3
239 mtspr SPRN_CCR0,r2
240 isync
241#endif
242
243 /*
244 * This is where the main kernel code starts.
245 */
246
247 /* ptr to current */
248 lis r2,init_task@h
249 ori r2,r2,init_task@l
250
251 /* ptr to current thread */
252 addi r4,r2,THREAD /* init task's THREAD */
253 mtspr SPRN_SPRG3,r4
254
255 /* stack */
256 lis r1,init_thread_union@h
257 ori r1,r1,init_thread_union@l
258 li r0,0
259 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
260
261 bl early_init
262
263/*
264 * Decide what sort of machine this is and initialize the MMU.
265 */
266 mr r3,r31
267 mr r4,r30
268 mr r5,r29
269 mr r6,r28
270 mr r7,r27
271 bl machine_init
272 bl MMU_init
273
274 /* Setup PTE pointers for the Abatron bdiGDB */
275 lis r6, swapper_pg_dir@h
276 ori r6, r6, swapper_pg_dir@l
277 lis r5, abatron_pteptrs@h
278 ori r5, r5, abatron_pteptrs@l
279 lis r4, KERNELBASE@h
280 ori r4, r4, KERNELBASE@l
281 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
282 stw r6, 0(r5)
283
284 /* Let's move on */
285 lis r4,start_kernel@h
286 ori r4,r4,start_kernel@l
287 lis r3,MSR_KERNEL@h
288 ori r3,r3,MSR_KERNEL@l
289 mtspr SPRN_SRR0,r4
290 mtspr SPRN_SRR1,r3
291 rfi /* change context and jump to start_kernel */
292
293/*
294 * Interrupt vector entry code
295 *
296 * The Book E MMUs are always on so we don't need to handle
297 * interrupts in real mode as with previous PPC processors. In
298 * this case we handle interrupts in the kernel virtual address
299 * space.
300 *
301 * Interrupt vectors are dynamically placed relative to the
302 * interrupt prefix as determined by the address of interrupt_base.
303 * The interrupt vectors offsets are programmed using the labels
304 * for each interrupt vector entry.
305 *
306 * Interrupt vectors must be aligned on a 16 byte boundary.
307 * We align on a 32 byte cache line boundary for good measure.
308 */
309
310interrupt_base:
311 /* Critical Input Interrupt */
312 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
313
314 /* Machine Check Interrupt */
315#ifdef CONFIG_440A
316 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
317#else
318 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
319#endif
320
321 /* Data Storage Interrupt */
322 START_EXCEPTION(DataStorage)
323 mtspr SPRN_SPRG0, r10 /* Save some working registers */
324 mtspr SPRN_SPRG1, r11
325 mtspr SPRN_SPRG4W, r12
326 mtspr SPRN_SPRG5W, r13
327 mfcr r11
328 mtspr SPRN_SPRG7W, r11
329
330 /*
331 * Check if it was a store fault, if not then bail
332 * because a user tried to access a kernel or
333 * read-protected page. Otherwise, get the
334 * offending address and handle it.
335 */
336 mfspr r10, SPRN_ESR
337 andis. r10, r10, ESR_ST@h
338 beq 2f
339
340 mfspr r10, SPRN_DEAR /* Get faulting address */
341
342 /* If we are faulting a kernel address, we have to use the
343 * kernel page tables.
344 */
345 lis r11, TASK_SIZE@h
346 cmplw r10, r11
347 blt+ 3f
348 lis r11, swapper_pg_dir@h
349 ori r11, r11, swapper_pg_dir@l
350
351 mfspr r12,SPRN_MMUCR
352 rlwinm r12,r12,0,0,23 /* Clear TID */
353
354 b 4f
355
356 /* Get the PGD for the current thread */
3573:
358 mfspr r11,SPRN_SPRG3
359 lwz r11,PGDIR(r11)
360
361 /* Load PID into MMUCR TID */
362 mfspr r12,SPRN_MMUCR /* Get MMUCR */
363 mfspr r13,SPRN_PID /* Get PID */
364 rlwimi r12,r13,0,24,31 /* Set TID */
365
3664:
367 mtspr SPRN_MMUCR,r12
368
369 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
370 lwzx r11, r12, r11 /* Get pgd/pmd entry */
371 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
372 beq 2f /* Bail if no table */
373
374 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
375 lwz r11, 4(r12) /* Get pte entry */
376
377 andi. r13, r11, _PAGE_RW /* Is it writeable? */
378 beq 2f /* Bail if not */
379
380 /* Update 'changed'.
381 */
382 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
383 stw r11, 4(r12) /* Update Linux page table */
384
385 li r13, PPC44x_TLB_SR@l /* Set SR */
386 rlwimi r13, r11, 29, 29, 29 /* SX = _PAGE_HWEXEC */
387 rlwimi r13, r11, 0, 30, 30 /* SW = _PAGE_RW */
388 rlwimi r13, r11, 29, 28, 28 /* UR = _PAGE_USER */
389 rlwimi r12, r11, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */
390 rlwimi r12, r11, 29, 30, 30 /* (_PAGE_USER>>3)->r12 */
391 and r12, r12, r11 /* HWEXEC/RW & USER */
392 rlwimi r13, r12, 0, 26, 26 /* UX = HWEXEC & USER */
393 rlwimi r13, r12, 3, 27, 27 /* UW = RW & USER */
394
395 rlwimi r11,r13,0,26,31 /* Insert static perms */
396
397 rlwinm r11,r11,0,20,15 /* Clear U0-U3 */
398
399 /* find the TLB index that caused the fault. It has to be here. */
400 tlbsx r10, 0, r10
401
402 tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */
403
404 /* Done...restore registers and get out of here.
405 */
406 mfspr r11, SPRN_SPRG7R
407 mtcr r11
408 mfspr r13, SPRN_SPRG5R
409 mfspr r12, SPRN_SPRG4R
410
411 mfspr r11, SPRN_SPRG1
412 mfspr r10, SPRN_SPRG0
413 rfi /* Force context change */
414
4152:
416 /*
417 * The bailout. Restore registers to pre-exception conditions
418 * and call the heavyweights to help us out.
419 */
420 mfspr r11, SPRN_SPRG7R
421 mtcr r11
422 mfspr r13, SPRN_SPRG5R
423 mfspr r12, SPRN_SPRG4R
424
425 mfspr r11, SPRN_SPRG1
426 mfspr r10, SPRN_SPRG0
427 b data_access
428
429 /* Instruction Storage Interrupt */
430 INSTRUCTION_STORAGE_EXCEPTION
431
432 /* External Input Interrupt */
433 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
434
435 /* Alignment Interrupt */
436 ALIGNMENT_EXCEPTION
437
438 /* Program Interrupt */
439 PROGRAM_EXCEPTION
440
441 /* Floating Point Unavailable Interrupt */
442#ifdef CONFIG_PPC_FPU
443 FP_UNAVAILABLE_EXCEPTION
444#else
445 EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
446#endif
447
448 /* System Call Interrupt */
449 START_EXCEPTION(SystemCall)
450 NORMAL_EXCEPTION_PROLOG
451 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
452
453 /* Auxillary Processor Unavailable Interrupt */
454 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
455
456 /* Decrementer Interrupt */
457 DECREMENTER_EXCEPTION
458
459 /* Fixed Internal Timer Interrupt */
460 /* TODO: Add FIT support */
461 EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
462
463 /* Watchdog Timer Interrupt */
464 /* TODO: Add watchdog support */
465#ifdef CONFIG_BOOKE_WDT
466 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
467#else
468 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
469#endif
470
471 /* Data TLB Error Interrupt */
472 START_EXCEPTION(DataTLBError)
473 mtspr SPRN_SPRG0, r10 /* Save some working registers */
474 mtspr SPRN_SPRG1, r11
475 mtspr SPRN_SPRG4W, r12
476 mtspr SPRN_SPRG5W, r13
477 mfcr r11
478 mtspr SPRN_SPRG7W, r11
479 mfspr r10, SPRN_DEAR /* Get faulting address */
480
481 /* If we are faulting a kernel address, we have to use the
482 * kernel page tables.
483 */
484 lis r11, TASK_SIZE@h
485 cmplw r10, r11
486 blt+ 3f
487 lis r11, swapper_pg_dir@h
488 ori r11, r11, swapper_pg_dir@l
489
490 mfspr r12,SPRN_MMUCR
491 rlwinm r12,r12,0,0,23 /* Clear TID */
492
493 b 4f
494
495 /* Get the PGD for the current thread */
4963:
497 mfspr r11,SPRN_SPRG3
498 lwz r11,PGDIR(r11)
499
500 /* Load PID into MMUCR TID */
501 mfspr r12,SPRN_MMUCR
502 mfspr r13,SPRN_PID /* Get PID */
503 rlwimi r12,r13,0,24,31 /* Set TID */
504
5054:
506 mtspr SPRN_MMUCR,r12
507
508 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
509 lwzx r11, r12, r11 /* Get pgd/pmd entry */
510 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
511 beq 2f /* Bail if no table */
512
513 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
514 lwz r11, 4(r12) /* Get pte entry */
515 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
516 beq 2f /* Bail if not present */
517
518 ori r11, r11, _PAGE_ACCESSED
519 stw r11, 4(r12)
520
521 /* Jump to common tlb load */
522 b finish_tlb_load
523
5242:
525 /* The bailout. Restore registers to pre-exception conditions
526 * and call the heavyweights to help us out.
527 */
528 mfspr r11, SPRN_SPRG7R
529 mtcr r11
530 mfspr r13, SPRN_SPRG5R
531 mfspr r12, SPRN_SPRG4R
532 mfspr r11, SPRN_SPRG1
533 mfspr r10, SPRN_SPRG0
534 b data_access
535
536 /* Instruction TLB Error Interrupt */
537 /*
538 * Nearly the same as above, except we get our
539 * information from different registers and bailout
540 * to a different point.
541 */
542 START_EXCEPTION(InstructionTLBError)
543 mtspr SPRN_SPRG0, r10 /* Save some working registers */
544 mtspr SPRN_SPRG1, r11
545 mtspr SPRN_SPRG4W, r12
546 mtspr SPRN_SPRG5W, r13
547 mfcr r11
548 mtspr SPRN_SPRG7W, r11
549 mfspr r10, SPRN_SRR0 /* Get faulting address */
550
551 /* If we are faulting a kernel address, we have to use the
552 * kernel page tables.
553 */
554 lis r11, TASK_SIZE@h
555 cmplw r10, r11
556 blt+ 3f
557 lis r11, swapper_pg_dir@h
558 ori r11, r11, swapper_pg_dir@l
559
560 mfspr r12,SPRN_MMUCR
561 rlwinm r12,r12,0,0,23 /* Clear TID */
562
563 b 4f
564
565 /* Get the PGD for the current thread */
5663:
567 mfspr r11,SPRN_SPRG3
568 lwz r11,PGDIR(r11)
569
570 /* Load PID into MMUCR TID */
571 mfspr r12,SPRN_MMUCR
572 mfspr r13,SPRN_PID /* Get PID */
573 rlwimi r12,r13,0,24,31 /* Set TID */
574
5754:
576 mtspr SPRN_MMUCR,r12
577
578 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
579 lwzx r11, r12, r11 /* Get pgd/pmd entry */
580 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
581 beq 2f /* Bail if no table */
582
583 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
584 lwz r11, 4(r12) /* Get pte entry */
585 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
586 beq 2f /* Bail if not present */
587
588 ori r11, r11, _PAGE_ACCESSED
589 stw r11, 4(r12)
590
591 /* Jump to common TLB load point */
592 b finish_tlb_load
593
5942:
595 /* The bailout. Restore registers to pre-exception conditions
596 * and call the heavyweights to help us out.
597 */
598 mfspr r11, SPRN_SPRG7R
599 mtcr r11
600 mfspr r13, SPRN_SPRG5R
601 mfspr r12, SPRN_SPRG4R
602 mfspr r11, SPRN_SPRG1
603 mfspr r10, SPRN_SPRG0
604 b InstructionStorage
605
606 /* Debug Interrupt */
607 DEBUG_EXCEPTION
608
609/*
610 * Local functions
611 */
612 /*
613 * Data TLB exceptions will bail out to this point
614 * if they can't resolve the lightweight TLB fault.
615 */
616data_access:
617 NORMAL_EXCEPTION_PROLOG
618 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
619 stw r5,_ESR(r11)
620 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
621 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
622
623/*
624
625 * Both the instruction and data TLB miss get to this
626 * point to load the TLB.
627 * r10 - EA of fault
628 * r11 - available to use
629 * r12 - Pointer to the 64-bit PTE
630 * r13 - available to use
631 * MMUCR - loaded with proper value when we get here
632 * Upon exit, we reload everything and RFI.
633 */
634finish_tlb_load:
635 /*
636 * We set execute, because we don't have the granularity to
637 * properly set this at the page level (Linux problem).
638 * If shared is set, we cause a zero PID->TID load.
639 * Many of these bits are software only. Bits we don't set
640 * here we (properly should) assume have the appropriate value.
641 */
642
643 /* Load the next available TLB index */
644 lis r13, tlb_44x_index@ha
645 lwz r13, tlb_44x_index@l(r13)
646 /* Load the TLB high watermark */
647 lis r11, tlb_44x_hwater@ha
648 lwz r11, tlb_44x_hwater@l(r11)
649
650 /* Increment, rollover, and store TLB index */
651 addi r13, r13, 1
652 cmpw 0, r13, r11 /* reserve entries */
653 ble 7f
654 li r13, 0
6557:
656 /* Store the next available TLB index */
657 lis r11, tlb_44x_index@ha
658 stw r13, tlb_44x_index@l(r11)
659
660 lwz r11, 0(r12) /* Get MS word of PTE */
661 lwz r12, 4(r12) /* Get LS word of PTE */
662 rlwimi r11, r12, 0, 0 , 19 /* Insert RPN */
663 tlbwe r11, r13, PPC44x_TLB_XLAT /* Write XLAT */
664
665 /*
666 * Create PAGEID. This is the faulting address,
667 * page size, and valid flag.
668 */
669 li r11, PPC44x_TLB_VALID | PPC44x_TLB_4K
670 rlwimi r10, r11, 0, 20, 31 /* Insert valid and page size */
671 tlbwe r10, r13, PPC44x_TLB_PAGEID /* Write PAGEID */
672
673 li r10, PPC44x_TLB_SR@l /* Set SR */
674 rlwimi r10, r12, 0, 30, 30 /* Set SW = _PAGE_RW */
675 rlwimi r10, r12, 29, 29, 29 /* SX = _PAGE_HWEXEC */
676 rlwimi r10, r12, 29, 28, 28 /* UR = _PAGE_USER */
677 rlwimi r11, r12, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */
678 and r11, r12, r11 /* HWEXEC & USER */
679 rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */
680
681 rlwimi r12, r10, 0, 26, 31 /* Insert static perms */
682 rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */
683 tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */
684
685 /* Done...restore registers and get out of here.
686 */
687 mfspr r11, SPRN_SPRG7R
688 mtcr r11
689 mfspr r13, SPRN_SPRG5R
690 mfspr r12, SPRN_SPRG4R
691 mfspr r11, SPRN_SPRG1
692 mfspr r10, SPRN_SPRG0
693 rfi /* Force context change */
694
695/*
696 * Global functions
697 */
698
699/*
700 * extern void giveup_altivec(struct task_struct *prev)
701 *
702 * The 44x core does not have an AltiVec unit.
703 */
704_GLOBAL(giveup_altivec)
705 blr
706
707/*
708 * extern void giveup_fpu(struct task_struct *prev)
709 *
710 * The 44x core does not have an FPU.
711 */
712#ifndef CONFIG_PPC_FPU
713_GLOBAL(giveup_fpu)
714 blr
715#endif
716
717/*
718 * extern void abort(void)
719 *
720 * At present, this routine just applies a system reset.
721 */
722_GLOBAL(abort)
723 mfspr r13,SPRN_DBCR0
724 oris r13,r13,DBCR0_RST_SYSTEM@h
725 mtspr SPRN_DBCR0,r13
726
727_GLOBAL(set_context)
728
729#ifdef CONFIG_BDI_SWITCH
730 /* Context switch the PTE pointer for the Abatron BDI2000.
731 * The PGDIR is the second parameter.
732 */
733 lis r5, abatron_pteptrs@h
734 ori r5, r5, abatron_pteptrs@l
735 stw r4, 0x4(r5)
736#endif
737 mtspr SPRN_PID,r3
738 isync /* Force context change */
739 blr
740
741/*
742 * We put a few things here that have to be page-aligned. This stuff
743 * goes at the beginning of the data segment, which is page-aligned.
744 */
745 .data
746 .align 12
747 .globl sdata
748sdata:
749 .globl empty_zero_page
750empty_zero_page:
751 .space 4096
752
753/*
754 * To support >32-bit physical addresses, we use an 8KB pgdir.
755 */
756 .globl swapper_pg_dir
757swapper_pg_dir:
758 .space 8192
759
760/* Reserved 4k for the critical exception stack & 4k for the machine
761 * check stack per CPU for kernel mode exceptions */
762 .section .bss
763 .align 12
764exception_stack_bottom:
765 .space BOOKE_EXCEPTION_STACK_SIZE
766 .globl exception_stack_top
767exception_stack_top:
768
769/*
770 * This space gets a copy of optional info passed to us by the bootstrap
771 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
772 */
773 .globl cmd_line
774cmd_line:
775 .space 512
776
777/*
778 * Room for two PTE pointers, usually the kernel and current user pointers
779 * to their respective root page table.
780 */
781abatron_pteptrs:
782 .space 8
diff --git a/arch/powerpc/kernel/head_4xx.S b/arch/powerpc/kernel/head_4xx.S
new file mode 100644
index 000000000000..2590e97f5539
--- /dev/null
+++ b/arch/powerpc/kernel/head_4xx.S
@@ -0,0 +1,1022 @@
1/*
2 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
3 * Initial PowerPC version.
4 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
5 * Rewritten for PReP
6 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
7 * Low-level exception handers, MMU support, and rewrite.
8 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
9 * PowerPC 8xx modifications.
10 * Copyright (c) 1998-1999 TiVo, Inc.
11 * PowerPC 403GCX modifications.
12 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
13 * PowerPC 403GCX/405GP modifications.
14 * Copyright 2000 MontaVista Software Inc.
15 * PPC405 modifications
16 * PowerPC 403GCX/405GP modifications.
17 * Author: MontaVista Software, Inc.
18 * frank_rowand@mvista.com or source@mvista.com
19 * debbie_chu@mvista.com
20 *
21 *
22 * Module name: head_4xx.S
23 *
24 * Description:
25 * Kernel execution entry point code.
26 *
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; either version
30 * 2 of the License, or (at your option) any later version.
31 *
32 */
33
34#include <linux/config.h>
35#include <asm/processor.h>
36#include <asm/page.h>
37#include <asm/mmu.h>
38#include <asm/pgtable.h>
39#include <asm/ibm4xx.h>
40#include <asm/cputable.h>
41#include <asm/thread_info.h>
42#include <asm/ppc_asm.h>
43#include <asm/asm-offsets.h>
44
45/* As with the other PowerPC ports, it is expected that when code
46 * execution begins here, the following registers contain valid, yet
47 * optional, information:
48 *
49 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
50 * r4 - Starting address of the init RAM disk
51 * r5 - Ending address of the init RAM disk
52 * r6 - Start of kernel command line string (e.g. "mem=96m")
53 * r7 - End of kernel command line string
54 *
55 * This is all going to change RSN when we add bi_recs....... -- Dan
56 */
57 .text
58_GLOBAL(_stext)
59_GLOBAL(_start)
60
61 /* Save parameters we are passed.
62 */
63 mr r31,r3
64 mr r30,r4
65 mr r29,r5
66 mr r28,r6
67 mr r27,r7
68
69 /* We have to turn on the MMU right away so we get cache modes
70 * set correctly.
71 */
72 bl initial_mmu
73
74/* We now have the lower 16 Meg mapped into TLB entries, and the caches
75 * ready to work.
76 */
77turn_on_mmu:
78 lis r0,MSR_KERNEL@h
79 ori r0,r0,MSR_KERNEL@l
80 mtspr SPRN_SRR1,r0
81 lis r0,start_here@h
82 ori r0,r0,start_here@l
83 mtspr SPRN_SRR0,r0
84 SYNC
85 rfi /* enables MMU */
86 b . /* prevent prefetch past rfi */
87
88/*
89 * This area is used for temporarily saving registers during the
90 * critical exception prolog.
91 */
92 . = 0xc0
93crit_save:
94_GLOBAL(crit_r10)
95 .space 4
96_GLOBAL(crit_r11)
97 .space 4
98
99/*
100 * Exception vector entry code. This code runs with address translation
101 * turned off (i.e. using physical addresses). We assume SPRG3 has the
102 * physical address of the current task thread_struct.
103 * Note that we have to have decremented r1 before we write to any fields
104 * of the exception frame, since a critical interrupt could occur at any
105 * time, and it will write to the area immediately below the current r1.
106 */
107#define NORMAL_EXCEPTION_PROLOG \
108 mtspr SPRN_SPRG0,r10; /* save two registers to work with */\
109 mtspr SPRN_SPRG1,r11; \
110 mtspr SPRN_SPRG2,r1; \
111 mfcr r10; /* save CR in r10 for now */\
112 mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
113 andi. r11,r11,MSR_PR; \
114 beq 1f; \
115 mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\
116 lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\
117 addi r1,r1,THREAD_SIZE; \
1181: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\
119 tophys(r11,r1); \
120 stw r10,_CCR(r11); /* save various registers */\
121 stw r12,GPR12(r11); \
122 stw r9,GPR9(r11); \
123 mfspr r10,SPRN_SPRG0; \
124 stw r10,GPR10(r11); \
125 mfspr r12,SPRN_SPRG1; \
126 stw r12,GPR11(r11); \
127 mflr r10; \
128 stw r10,_LINK(r11); \
129 mfspr r10,SPRN_SPRG2; \
130 mfspr r12,SPRN_SRR0; \
131 stw r10,GPR1(r11); \
132 mfspr r9,SPRN_SRR1; \
133 stw r10,0(r11); \
134 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
135 stw r0,GPR0(r11); \
136 SAVE_4GPRS(3, r11); \
137 SAVE_2GPRS(7, r11)
138
139/*
140 * Exception prolog for critical exceptions. This is a little different
141 * from the normal exception prolog above since a critical exception
142 * can potentially occur at any point during normal exception processing.
143 * Thus we cannot use the same SPRG registers as the normal prolog above.
144 * Instead we use a couple of words of memory at low physical addresses.
145 * This is OK since we don't support SMP on these processors.
146 */
147#define CRITICAL_EXCEPTION_PROLOG \
148 stw r10,crit_r10@l(0); /* save two registers to work with */\
149 stw r11,crit_r11@l(0); \
150 mfcr r10; /* save CR in r10 for now */\
151 mfspr r11,SPRN_SRR3; /* check whether user or kernel */\
152 andi. r11,r11,MSR_PR; \
153 lis r11,critical_stack_top@h; \
154 ori r11,r11,critical_stack_top@l; \
155 beq 1f; \
156 /* COMING FROM USER MODE */ \
157 mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\
158 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
159 addi r11,r11,THREAD_SIZE; \
1601: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
161 tophys(r11,r11); \
162 stw r10,_CCR(r11); /* save various registers */\
163 stw r12,GPR12(r11); \
164 stw r9,GPR9(r11); \
165 mflr r10; \
166 stw r10,_LINK(r11); \
167 mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\
168 stw r12,_DEAR(r11); /* since they may have had stuff */\
169 mfspr r9,SPRN_ESR; /* in them at the point where the */\
170 stw r9,_ESR(r11); /* exception was taken */\
171 mfspr r12,SPRN_SRR2; \
172 stw r1,GPR1(r11); \
173 mfspr r9,SPRN_SRR3; \
174 stw r1,0(r11); \
175 tovirt(r1,r11); \
176 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
177 stw r0,GPR0(r11); \
178 SAVE_4GPRS(3, r11); \
179 SAVE_2GPRS(7, r11)
180
181 /*
182 * State at this point:
183 * r9 saved in stack frame, now saved SRR3 & ~MSR_WE
184 * r10 saved in crit_r10 and in stack frame, trashed
185 * r11 saved in crit_r11 and in stack frame,
186 * now phys stack/exception frame pointer
187 * r12 saved in stack frame, now saved SRR2
188 * CR saved in stack frame, CR0.EQ = !SRR3.PR
189 * LR, DEAR, ESR in stack frame
190 * r1 saved in stack frame, now virt stack/excframe pointer
191 * r0, r3-r8 saved in stack frame
192 */
193
194/*
195 * Exception vectors.
196 */
197#define START_EXCEPTION(n, label) \
198 . = n; \
199label:
200
201#define EXCEPTION(n, label, hdlr, xfer) \
202 START_EXCEPTION(n, label); \
203 NORMAL_EXCEPTION_PROLOG; \
204 addi r3,r1,STACK_FRAME_OVERHEAD; \
205 xfer(n, hdlr)
206
207#define CRITICAL_EXCEPTION(n, label, hdlr) \
208 START_EXCEPTION(n, label); \
209 CRITICAL_EXCEPTION_PROLOG; \
210 addi r3,r1,STACK_FRAME_OVERHEAD; \
211 EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
212 NOCOPY, crit_transfer_to_handler, \
213 ret_from_crit_exc)
214
215#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \
216 li r10,trap; \
217 stw r10,_TRAP(r11); \
218 lis r10,msr@h; \
219 ori r10,r10,msr@l; \
220 copyee(r10, r9); \
221 bl tfer; \
222 .long hdlr; \
223 .long ret
224
225#define COPY_EE(d, s) rlwimi d,s,0,16,16
226#define NOCOPY(d, s)
227
228#define EXC_XFER_STD(n, hdlr) \
229 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \
230 ret_from_except_full)
231
232#define EXC_XFER_LITE(n, hdlr) \
233 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
234 ret_from_except)
235
236#define EXC_XFER_EE(n, hdlr) \
237 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \
238 ret_from_except_full)
239
240#define EXC_XFER_EE_LITE(n, hdlr) \
241 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \
242 ret_from_except)
243
244
245/*
246 * 0x0100 - Critical Interrupt Exception
247 */
248 CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception)
249
250/*
251 * 0x0200 - Machine Check Exception
252 */
253 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
254
255/*
256 * 0x0300 - Data Storage Exception
257 * This happens for just a few reasons. U0 set (but we don't do that),
258 * or zone protection fault (user violation, write to protected page).
259 * If this is just an update of modified status, we do that quickly
260 * and exit. Otherwise, we call heavywight functions to do the work.
261 */
262 START_EXCEPTION(0x0300, DataStorage)
263 mtspr SPRN_SPRG0, r10 /* Save some working registers */
264 mtspr SPRN_SPRG1, r11
265#ifdef CONFIG_403GCX
266 stw r12, 0(r0)
267 stw r9, 4(r0)
268 mfcr r11
269 mfspr r12, SPRN_PID
270 stw r11, 8(r0)
271 stw r12, 12(r0)
272#else
273 mtspr SPRN_SPRG4, r12
274 mtspr SPRN_SPRG5, r9
275 mfcr r11
276 mfspr r12, SPRN_PID
277 mtspr SPRN_SPRG7, r11
278 mtspr SPRN_SPRG6, r12
279#endif
280
281 /* First, check if it was a zone fault (which means a user
282 * tried to access a kernel or read-protected page - always
283 * a SEGV). All other faults here must be stores, so no
284 * need to check ESR_DST as well. */
285 mfspr r10, SPRN_ESR
286 andis. r10, r10, ESR_DIZ@h
287 bne 2f
288
289 mfspr r10, SPRN_DEAR /* Get faulting address */
290
291 /* If we are faulting a kernel address, we have to use the
292 * kernel page tables.
293 */
294 lis r11, TASK_SIZE@h
295 cmplw r10, r11
296 blt+ 3f
297 lis r11, swapper_pg_dir@h
298 ori r11, r11, swapper_pg_dir@l
299 li r9, 0
300 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
301 b 4f
302
303 /* Get the PGD for the current thread.
304 */
3053:
306 mfspr r11,SPRN_SPRG3
307 lwz r11,PGDIR(r11)
3084:
309 tophys(r11, r11)
310 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
311 lwz r11, 0(r11) /* Get L1 entry */
312 rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */
313 beq 2f /* Bail if no table */
314
315 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
316 lwz r11, 0(r12) /* Get Linux PTE */
317
318 andi. r9, r11, _PAGE_RW /* Is it writeable? */
319 beq 2f /* Bail if not */
320
321 /* Update 'changed'.
322 */
323 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
324 stw r11, 0(r12) /* Update Linux page table */
325
326 /* Most of the Linux PTE is ready to load into the TLB LO.
327 * We set ZSEL, where only the LS-bit determines user access.
328 * We set execute, because we don't have the granularity to
329 * properly set this at the page level (Linux problem).
330 * If shared is set, we cause a zero PID->TID load.
331 * Many of these bits are software only. Bits we don't set
332 * here we (properly should) assume have the appropriate value.
333 */
334 li r12, 0x0ce2
335 andc r11, r11, r12 /* Make sure 20, 21 are zero */
336
337 /* find the TLB index that caused the fault. It has to be here.
338 */
339 tlbsx r9, 0, r10
340
341 tlbwe r11, r9, TLB_DATA /* Load TLB LO */
342
343 /* Done...restore registers and get out of here.
344 */
345#ifdef CONFIG_403GCX
346 lwz r12, 12(r0)
347 lwz r11, 8(r0)
348 mtspr SPRN_PID, r12
349 mtcr r11
350 lwz r9, 4(r0)
351 lwz r12, 0(r0)
352#else
353 mfspr r12, SPRN_SPRG6
354 mfspr r11, SPRN_SPRG7
355 mtspr SPRN_PID, r12
356 mtcr r11
357 mfspr r9, SPRN_SPRG5
358 mfspr r12, SPRN_SPRG4
359#endif
360 mfspr r11, SPRN_SPRG1
361 mfspr r10, SPRN_SPRG0
362 PPC405_ERR77_SYNC
363 rfi /* Should sync shadow TLBs */
364 b . /* prevent prefetch past rfi */
365
3662:
367 /* The bailout. Restore registers to pre-exception conditions
368 * and call the heavyweights to help us out.
369 */
370#ifdef CONFIG_403GCX
371 lwz r12, 12(r0)
372 lwz r11, 8(r0)
373 mtspr SPRN_PID, r12
374 mtcr r11
375 lwz r9, 4(r0)
376 lwz r12, 0(r0)
377#else
378 mfspr r12, SPRN_SPRG6
379 mfspr r11, SPRN_SPRG7
380 mtspr SPRN_PID, r12
381 mtcr r11
382 mfspr r9, SPRN_SPRG5
383 mfspr r12, SPRN_SPRG4
384#endif
385 mfspr r11, SPRN_SPRG1
386 mfspr r10, SPRN_SPRG0
387 b DataAccess
388
389/*
390 * 0x0400 - Instruction Storage Exception
391 * This is caused by a fetch from non-execute or guarded pages.
392 */
393 START_EXCEPTION(0x0400, InstructionAccess)
394 NORMAL_EXCEPTION_PROLOG
395 mr r4,r12 /* Pass SRR0 as arg2 */
396 li r5,0 /* Pass zero as arg3 */
397 EXC_XFER_EE_LITE(0x400, handle_page_fault)
398
399/* 0x0500 - External Interrupt Exception */
400 EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
401
402/* 0x0600 - Alignment Exception */
403 START_EXCEPTION(0x0600, Alignment)
404 NORMAL_EXCEPTION_PROLOG
405 mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */
406 stw r4,_DEAR(r11)
407 addi r3,r1,STACK_FRAME_OVERHEAD
408 EXC_XFER_EE(0x600, alignment_exception)
409
410/* 0x0700 - Program Exception */
411 START_EXCEPTION(0x0700, ProgramCheck)
412 NORMAL_EXCEPTION_PROLOG
413 mfspr r4,SPRN_ESR /* Grab the ESR and save it */
414 stw r4,_ESR(r11)
415 addi r3,r1,STACK_FRAME_OVERHEAD
416 EXC_XFER_STD(0x700, program_check_exception)
417
418 EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE)
419 EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE)
420 EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE)
421 EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE)
422
423/* 0x0C00 - System Call Exception */
424 START_EXCEPTION(0x0C00, SystemCall)
425 NORMAL_EXCEPTION_PROLOG
426 EXC_XFER_EE_LITE(0xc00, DoSyscall)
427
428 EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE)
429 EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE)
430 EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE)
431
432/* 0x1000 - Programmable Interval Timer (PIT) Exception */
433 START_EXCEPTION(0x1000, Decrementer)
434 NORMAL_EXCEPTION_PROLOG
435 lis r0,TSR_PIS@h
436 mtspr SPRN_TSR,r0 /* Clear the PIT exception */
437 addi r3,r1,STACK_FRAME_OVERHEAD
438 EXC_XFER_LITE(0x1000, timer_interrupt)
439
440#if 0
441/* NOTE:
442 * FIT and WDT handlers are not implemented yet.
443 */
444
445/* 0x1010 - Fixed Interval Timer (FIT) Exception
446*/
447 STND_EXCEPTION(0x1010, FITException, unknown_exception)
448
449/* 0x1020 - Watchdog Timer (WDT) Exception
450*/
451#ifdef CONFIG_BOOKE_WDT
452 CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException)
453#else
454 CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception)
455#endif
456#endif
457
458/* 0x1100 - Data TLB Miss Exception
459 * As the name implies, translation is not in the MMU, so search the
460 * page tables and fix it. The only purpose of this function is to
461 * load TLB entries from the page table if they exist.
462 */
463 START_EXCEPTION(0x1100, DTLBMiss)
464 mtspr SPRN_SPRG0, r10 /* Save some working registers */
465 mtspr SPRN_SPRG1, r11
466#ifdef CONFIG_403GCX
467 stw r12, 0(r0)
468 stw r9, 4(r0)
469 mfcr r11
470 mfspr r12, SPRN_PID
471 stw r11, 8(r0)
472 stw r12, 12(r0)
473#else
474 mtspr SPRN_SPRG4, r12
475 mtspr SPRN_SPRG5, r9
476 mfcr r11
477 mfspr r12, SPRN_PID
478 mtspr SPRN_SPRG7, r11
479 mtspr SPRN_SPRG6, r12
480#endif
481 mfspr r10, SPRN_DEAR /* Get faulting address */
482
483 /* If we are faulting a kernel address, we have to use the
484 * kernel page tables.
485 */
486 lis r11, TASK_SIZE@h
487 cmplw r10, r11
488 blt+ 3f
489 lis r11, swapper_pg_dir@h
490 ori r11, r11, swapper_pg_dir@l
491 li r9, 0
492 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
493 b 4f
494
495 /* Get the PGD for the current thread.
496 */
4973:
498 mfspr r11,SPRN_SPRG3
499 lwz r11,PGDIR(r11)
5004:
501 tophys(r11, r11)
502 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
503 lwz r12, 0(r11) /* Get L1 entry */
504 andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */
505 beq 2f /* Bail if no table */
506
507 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
508 lwz r11, 0(r12) /* Get Linux PTE */
509 andi. r9, r11, _PAGE_PRESENT
510 beq 5f
511
512 ori r11, r11, _PAGE_ACCESSED
513 stw r11, 0(r12)
514
515 /* Create TLB tag. This is the faulting address plus a static
516 * set of bits. These are size, valid, E, U0.
517 */
518 li r12, 0x00c0
519 rlwimi r10, r12, 0, 20, 31
520
521 b finish_tlb_load
522
5232: /* Check for possible large-page pmd entry */
524 rlwinm. r9, r12, 2, 22, 24
525 beq 5f
526
527 /* Create TLB tag. This is the faulting address, plus a static
528 * set of bits (valid, E, U0) plus the size from the PMD.
529 */
530 ori r9, r9, 0x40
531 rlwimi r10, r9, 0, 20, 31
532 mr r11, r12
533
534 b finish_tlb_load
535
5365:
537 /* The bailout. Restore registers to pre-exception conditions
538 * and call the heavyweights to help us out.
539 */
540#ifdef CONFIG_403GCX
541 lwz r12, 12(r0)
542 lwz r11, 8(r0)
543 mtspr SPRN_PID, r12
544 mtcr r11
545 lwz r9, 4(r0)
546 lwz r12, 0(r0)
547#else
548 mfspr r12, SPRN_SPRG6
549 mfspr r11, SPRN_SPRG7
550 mtspr SPRN_PID, r12
551 mtcr r11
552 mfspr r9, SPRN_SPRG5
553 mfspr r12, SPRN_SPRG4
554#endif
555 mfspr r11, SPRN_SPRG1
556 mfspr r10, SPRN_SPRG0
557 b DataAccess
558
559/* 0x1200 - Instruction TLB Miss Exception
560 * Nearly the same as above, except we get our information from different
561 * registers and bailout to a different point.
562 */
563 START_EXCEPTION(0x1200, ITLBMiss)
564 mtspr SPRN_SPRG0, r10 /* Save some working registers */
565 mtspr SPRN_SPRG1, r11
566#ifdef CONFIG_403GCX
567 stw r12, 0(r0)
568 stw r9, 4(r0)
569 mfcr r11
570 mfspr r12, SPRN_PID
571 stw r11, 8(r0)
572 stw r12, 12(r0)
573#else
574 mtspr SPRN_SPRG4, r12
575 mtspr SPRN_SPRG5, r9
576 mfcr r11
577 mfspr r12, SPRN_PID
578 mtspr SPRN_SPRG7, r11
579 mtspr SPRN_SPRG6, r12
580#endif
581 mfspr r10, SPRN_SRR0 /* Get faulting address */
582
583 /* If we are faulting a kernel address, we have to use the
584 * kernel page tables.
585 */
586 lis r11, TASK_SIZE@h
587 cmplw r10, r11
588 blt+ 3f
589 lis r11, swapper_pg_dir@h
590 ori r11, r11, swapper_pg_dir@l
591 li r9, 0
592 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
593 b 4f
594
595 /* Get the PGD for the current thread.
596 */
5973:
598 mfspr r11,SPRN_SPRG3
599 lwz r11,PGDIR(r11)
6004:
601 tophys(r11, r11)
602 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
603 lwz r12, 0(r11) /* Get L1 entry */
604 andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */
605 beq 2f /* Bail if no table */
606
607 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
608 lwz r11, 0(r12) /* Get Linux PTE */
609 andi. r9, r11, _PAGE_PRESENT
610 beq 5f
611
612 ori r11, r11, _PAGE_ACCESSED
613 stw r11, 0(r12)
614
615 /* Create TLB tag. This is the faulting address plus a static
616 * set of bits. These are size, valid, E, U0.
617 */
618 li r12, 0x00c0
619 rlwimi r10, r12, 0, 20, 31
620
621 b finish_tlb_load
622
6232: /* Check for possible large-page pmd entry */
624 rlwinm. r9, r12, 2, 22, 24
625 beq 5f
626
627 /* Create TLB tag. This is the faulting address, plus a static
628 * set of bits (valid, E, U0) plus the size from the PMD.
629 */
630 ori r9, r9, 0x40
631 rlwimi r10, r9, 0, 20, 31
632 mr r11, r12
633
634 b finish_tlb_load
635
6365:
637 /* The bailout. Restore registers to pre-exception conditions
638 * and call the heavyweights to help us out.
639 */
640#ifdef CONFIG_403GCX
641 lwz r12, 12(r0)
642 lwz r11, 8(r0)
643 mtspr SPRN_PID, r12
644 mtcr r11
645 lwz r9, 4(r0)
646 lwz r12, 0(r0)
647#else
648 mfspr r12, SPRN_SPRG6
649 mfspr r11, SPRN_SPRG7
650 mtspr SPRN_PID, r12
651 mtcr r11
652 mfspr r9, SPRN_SPRG5
653 mfspr r12, SPRN_SPRG4
654#endif
655 mfspr r11, SPRN_SPRG1
656 mfspr r10, SPRN_SPRG0
657 b InstructionAccess
658
659 EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE)
660 EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE)
661 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
662 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
663#ifdef CONFIG_IBM405_ERR51
664 /* 405GP errata 51 */
665 START_EXCEPTION(0x1700, Trap_17)
666 b DTLBMiss
667#else
668 EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
669#endif
670 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
671 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
672 EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE)
673 EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE)
674 EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE)
675 EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE)
676 EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE)
677 EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE)
678
679/* Check for a single step debug exception while in an exception
680 * handler before state has been saved. This is to catch the case
681 * where an instruction that we are trying to single step causes
682 * an exception (eg ITLB/DTLB miss) and thus the first instruction of
683 * the exception handler generates a single step debug exception.
684 *
685 * If we get a debug trap on the first instruction of an exception handler,
686 * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is
687 * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR).
688 * The exception handler was handling a non-critical interrupt, so it will
689 * save (and later restore) the MSR via SPRN_SRR1, which will still have
690 * the MSR_DE bit set.
691 */
692 /* 0x2000 - Debug Exception */
693 START_EXCEPTION(0x2000, DebugTrap)
694 CRITICAL_EXCEPTION_PROLOG
695
696 /*
697 * If this is a single step or branch-taken exception in an
698 * exception entry sequence, it was probably meant to apply to
699 * the code where the exception occurred (since exception entry
700 * doesn't turn off DE automatically). We simulate the effect
701 * of turning off DE on entry to an exception handler by turning
702 * off DE in the SRR3 value and clearing the debug status.
703 */
704 mfspr r10,SPRN_DBSR /* check single-step/branch taken */
705 andis. r10,r10,DBSR_IC@h
706 beq+ 2f
707
708 andi. r10,r9,MSR_IR|MSR_PR /* check supervisor + MMU off */
709 beq 1f /* branch and fix it up */
710
711 mfspr r10,SPRN_SRR2 /* Faulting instruction address */
712 cmplwi r10,0x2100
713 bgt+ 2f /* address above exception vectors */
714
715 /* here it looks like we got an inappropriate debug exception. */
7161: rlwinm r9,r9,0,~MSR_DE /* clear DE in the SRR3 value */
717 lis r10,DBSR_IC@h /* clear the IC event */
718 mtspr SPRN_DBSR,r10
719 /* restore state and get out */
720 lwz r10,_CCR(r11)
721 lwz r0,GPR0(r11)
722 lwz r1,GPR1(r11)
723 mtcrf 0x80,r10
724 mtspr SPRN_SRR2,r12
725 mtspr SPRN_SRR3,r9
726 lwz r9,GPR9(r11)
727 lwz r12,GPR12(r11)
728 lwz r10,crit_r10@l(0)
729 lwz r11,crit_r11@l(0)
730 PPC405_ERR77_SYNC
731 rfci
732 b .
733
734 /* continue normal handling for a critical exception... */
7352: mfspr r4,SPRN_DBSR
736 addi r3,r1,STACK_FRAME_OVERHEAD
737 EXC_XFER_TEMPLATE(DebugException, 0x2002, \
738 (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
739 NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
740
741/*
742 * The other Data TLB exceptions bail out to this point
743 * if they can't resolve the lightweight TLB fault.
744 */
745DataAccess:
746 NORMAL_EXCEPTION_PROLOG
747 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
748 stw r5,_ESR(r11)
749 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
750 EXC_XFER_EE_LITE(0x300, handle_page_fault)
751
752/* Other PowerPC processors, namely those derived from the 6xx-series
753 * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
754 * However, for the 4xx-series processors these are neither defined nor
755 * reserved.
756 */
757
758 /* Damn, I came up one instruction too many to fit into the
759 * exception space :-). Both the instruction and data TLB
760 * miss get to this point to load the TLB.
761 * r10 - TLB_TAG value
762 * r11 - Linux PTE
763 * r12, r9 - avilable to use
764 * PID - loaded with proper value when we get here
765 * Upon exit, we reload everything and RFI.
766 * Actually, it will fit now, but oh well.....a common place
767 * to load the TLB.
768 */
769tlb_4xx_index:
770 .long 0
771finish_tlb_load:
772 /* load the next available TLB index.
773 */
774 lwz r9, tlb_4xx_index@l(0)
775 addi r9, r9, 1
776 andi. r9, r9, (PPC4XX_TLB_SIZE-1)
777 stw r9, tlb_4xx_index@l(0)
778
7796:
780 /*
781 * Clear out the software-only bits in the PTE to generate the
782 * TLB_DATA value. These are the bottom 2 bits of the RPM, the
783 * top 3 bits of the zone field, and M.
784 */
785 li r12, 0x0ce2
786 andc r11, r11, r12
787
788 tlbwe r11, r9, TLB_DATA /* Load TLB LO */
789 tlbwe r10, r9, TLB_TAG /* Load TLB HI */
790
791 /* Done...restore registers and get out of here.
792 */
793#ifdef CONFIG_403GCX
794 lwz r12, 12(r0)
795 lwz r11, 8(r0)
796 mtspr SPRN_PID, r12
797 mtcr r11
798 lwz r9, 4(r0)
799 lwz r12, 0(r0)
800#else
801 mfspr r12, SPRN_SPRG6
802 mfspr r11, SPRN_SPRG7
803 mtspr SPRN_PID, r12
804 mtcr r11
805 mfspr r9, SPRN_SPRG5
806 mfspr r12, SPRN_SPRG4
807#endif
808 mfspr r11, SPRN_SPRG1
809 mfspr r10, SPRN_SPRG0
810 PPC405_ERR77_SYNC
811 rfi /* Should sync shadow TLBs */
812 b . /* prevent prefetch past rfi */
813
814/* extern void giveup_fpu(struct task_struct *prev)
815 *
816 * The PowerPC 4xx family of processors do not have an FPU, so this just
817 * returns.
818 */
819_GLOBAL(giveup_fpu)
820 blr
821
822/* This is where the main kernel code starts.
823 */
824start_here:
825
826 /* ptr to current */
827 lis r2,init_task@h
828 ori r2,r2,init_task@l
829
830 /* ptr to phys current thread */
831 tophys(r4,r2)
832 addi r4,r4,THREAD /* init task's THREAD */
833 mtspr SPRN_SPRG3,r4
834
835 /* stack */
836 lis r1,init_thread_union@ha
837 addi r1,r1,init_thread_union@l
838 li r0,0
839 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
840
841 bl early_init /* We have to do this with MMU on */
842
843/*
844 * Decide what sort of machine this is and initialize the MMU.
845 */
846 mr r3,r31
847 mr r4,r30
848 mr r5,r29
849 mr r6,r28
850 mr r7,r27
851 bl machine_init
852 bl MMU_init
853
854/* Go back to running unmapped so we can load up new values
855 * and change to using our exception vectors.
856 * On the 4xx, all we have to do is invalidate the TLB to clear
857 * the old 16M byte TLB mappings.
858 */
859 lis r4,2f@h
860 ori r4,r4,2f@l
861 tophys(r4,r4)
862 lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h
863 ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l
864 mtspr SPRN_SRR0,r4
865 mtspr SPRN_SRR1,r3
866 rfi
867 b . /* prevent prefetch past rfi */
868
869/* Load up the kernel context */
8702:
871 sync /* Flush to memory before changing TLB */
872 tlbia
873 isync /* Flush shadow TLBs */
874
875 /* set up the PTE pointers for the Abatron bdiGDB.
876 */
877 lis r6, swapper_pg_dir@h
878 ori r6, r6, swapper_pg_dir@l
879 lis r5, abatron_pteptrs@h
880 ori r5, r5, abatron_pteptrs@l
881 stw r5, 0xf0(r0) /* Must match your Abatron config file */
882 tophys(r5,r5)
883 stw r6, 0(r5)
884
885/* Now turn on the MMU for real! */
886 lis r4,MSR_KERNEL@h
887 ori r4,r4,MSR_KERNEL@l
888 lis r3,start_kernel@h
889 ori r3,r3,start_kernel@l
890 mtspr SPRN_SRR0,r3
891 mtspr SPRN_SRR1,r4
892 rfi /* enable MMU and jump to start_kernel */
893 b . /* prevent prefetch past rfi */
894
895/* Set up the initial MMU state so we can do the first level of
896 * kernel initialization. This maps the first 16 MBytes of memory 1:1
897 * virtual to physical and more importantly sets the cache mode.
898 */
899initial_mmu:
900 tlbia /* Invalidate all TLB entries */
901 isync
902
903 /* We should still be executing code at physical address 0x0000xxxx
904 * at this point. However, start_here is at virtual address
905 * 0xC000xxxx. So, set up a TLB mapping to cover this once
906 * translation is enabled.
907 */
908
909 lis r3,KERNELBASE@h /* Load the kernel virtual address */
910 ori r3,r3,KERNELBASE@l
911 tophys(r4,r3) /* Load the kernel physical address */
912
913 iccci r0,r3 /* Invalidate the i-cache before use */
914
915 /* Load the kernel PID.
916 */
917 li r0,0
918 mtspr SPRN_PID,r0
919 sync
920
921 /* Configure and load two entries into TLB slots 62 and 63.
922 * In case we are pinning TLBs, these are reserved in by the
923 * other TLB functions. If not reserving, then it doesn't
924 * matter where they are loaded.
925 */
926 clrrwi r4,r4,10 /* Mask off the real page number */
927 ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
928
929 clrrwi r3,r3,10 /* Mask off the effective page number */
930 ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
931
932 li r0,63 /* TLB slot 63 */
933
934 tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */
935 tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */
936
937#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE)
938
939 /* Load a TLB entry for the UART, so that ppc4xx_progress() can use
940 * the UARTs nice and early. We use a 4k real==virtual mapping. */
941
942 lis r3,SERIAL_DEBUG_IO_BASE@h
943 ori r3,r3,SERIAL_DEBUG_IO_BASE@l
944 mr r4,r3
945 clrrwi r4,r4,12
946 ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
947
948 clrrwi r3,r3,12
949 ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
950
951 li r0,0 /* TLB slot 0 */
952 tlbwe r4,r0,TLB_DATA
953 tlbwe r3,r0,TLB_TAG
954#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */
955
956 isync
957
958 /* Establish the exception vector base
959 */
960 lis r4,KERNELBASE@h /* EVPR only uses the high 16-bits */
961 tophys(r0,r4) /* Use the physical address */
962 mtspr SPRN_EVPR,r0
963
964 blr
965
966_GLOBAL(abort)
967 mfspr r13,SPRN_DBCR0
968 oris r13,r13,DBCR0_RST_SYSTEM@h
969 mtspr SPRN_DBCR0,r13
970
971_GLOBAL(set_context)
972
973#ifdef CONFIG_BDI_SWITCH
974 /* Context switch the PTE pointer for the Abatron BDI2000.
975 * The PGDIR is the second parameter.
976 */
977 lis r5, KERNELBASE@h
978 lwz r5, 0xf0(r5)
979 stw r4, 0x4(r5)
980#endif
981 sync
982 mtspr SPRN_PID,r3
983 isync /* Need an isync to flush shadow */
984 /* TLBs after changing PID */
985 blr
986
987/* We put a few things here that have to be page-aligned. This stuff
988 * goes at the beginning of the data segment, which is page-aligned.
989 */
990 .data
991 .align 12
992 .globl sdata
993sdata:
994 .globl empty_zero_page
995empty_zero_page:
996 .space 4096
997 .globl swapper_pg_dir
998swapper_pg_dir:
999 .space 4096
1000
1001
1002/* Stack for handling critical exceptions from kernel mode */
1003 .section .bss
1004 .align 12
1005exception_stack_bottom:
1006 .space 4096
1007critical_stack_top:
1008 .globl exception_stack_top
1009exception_stack_top:
1010
1011/* This space gets a copy of optional info passed to us by the bootstrap
1012 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1013 */
1014 .globl cmd_line
1015cmd_line:
1016 .space 512
1017
1018/* Room for two PTE pointers, usually the kernel and current user pointers
1019 * to their respective root page table.
1020 */
1021abatron_pteptrs:
1022 .space 8
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
new file mode 100644
index 000000000000..147215a0d6c0
--- /dev/null
+++ b/arch/powerpc/kernel/head_64.S
@@ -0,0 +1,1957 @@
1/*
2 * arch/ppc64/kernel/head.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
13 *
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#include <linux/config.h>
27#include <linux/threads.h>
28#include <asm/reg.h>
29#include <asm/page.h>
30#include <asm/mmu.h>
31#include <asm/systemcfg.h>
32#include <asm/ppc_asm.h>
33#include <asm/asm-offsets.h>
34#include <asm/bug.h>
35#include <asm/cputable.h>
36#include <asm/setup.h>
37#include <asm/hvcall.h>
38#include <asm/iSeries/LparMap.h>
39#include <asm/thread_info.h>
40
41#ifdef CONFIG_PPC_ISERIES
42#define DO_SOFT_DISABLE
43#endif
44
45/*
46 * We layout physical memory as follows:
47 * 0x0000 - 0x00ff : Secondary processor spin code
48 * 0x0100 - 0x2fff : pSeries Interrupt prologs
49 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
50 * 0x6000 - 0x6fff : Initial (CPU0) segment table
51 * 0x7000 - 0x7fff : FWNMI data area
52 * 0x8000 - : Early init and support code
53 */
54
55/*
56 * SPRG Usage
57 *
58 * Register Definition
59 *
60 * SPRG0 reserved for hypervisor
61 * SPRG1 temp - used to save gpr
62 * SPRG2 temp - used to save gpr
63 * SPRG3 virt addr of paca
64 */
65
66/*
67 * Entering into this code we make the following assumptions:
68 * For pSeries:
69 * 1. The MMU is off & open firmware is running in real mode.
70 * 2. The kernel is entered at __start
71 *
72 * For iSeries:
73 * 1. The MMU is on (as it always is for iSeries)
74 * 2. The kernel is entered at system_reset_iSeries
75 */
76
77 .text
78 .globl _stext
79_stext:
80#ifdef CONFIG_PPC_MULTIPLATFORM
81_GLOBAL(__start)
82 /* NOP this out unconditionally */
83BEGIN_FTR_SECTION
84 b .__start_initialization_multiplatform
85END_FTR_SECTION(0, 1)
86#endif /* CONFIG_PPC_MULTIPLATFORM */
87
88 /* Catch branch to 0 in real mode */
89 trap
90
91#ifdef CONFIG_PPC_ISERIES
92 /*
93 * At offset 0x20, there is a pointer to iSeries LPAR data.
94 * This is required by the hypervisor
95 */
96 . = 0x20
97 .llong hvReleaseData-KERNELBASE
98
99 /*
100 * At offset 0x28 and 0x30 are offsets to the mschunks_map
101 * array (used by the iSeries LPAR debugger to do translation
102 * between physical addresses and absolute addresses) and
103 * to the pidhash table (also used by the debugger)
104 */
105 .llong mschunks_map-KERNELBASE
106 .llong 0 /* pidhash-KERNELBASE SFRXXX */
107
108 /* Offset 0x38 - Pointer to start of embedded System.map */
109 .globl embedded_sysmap_start
110embedded_sysmap_start:
111 .llong 0
112 /* Offset 0x40 - Pointer to end of embedded System.map */
113 .globl embedded_sysmap_end
114embedded_sysmap_end:
115 .llong 0
116
117#endif /* CONFIG_PPC_ISERIES */
118
119 /* Secondary processors spin on this value until it goes to 1. */
120 .globl __secondary_hold_spinloop
121__secondary_hold_spinloop:
122 .llong 0x0
123
124 /* Secondary processors write this value with their cpu # */
125 /* after they enter the spin loop immediately below. */
126 .globl __secondary_hold_acknowledge
127__secondary_hold_acknowledge:
128 .llong 0x0
129
130 . = 0x60
131/*
132 * The following code is used on pSeries to hold secondary processors
133 * in a spin loop after they have been freed from OpenFirmware, but
134 * before the bulk of the kernel has been relocated. This code
135 * is relocated to physical address 0x60 before prom_init is run.
136 * All of it must fit below the first exception vector at 0x100.
137 */
138_GLOBAL(__secondary_hold)
139 mfmsr r24
140 ori r24,r24,MSR_RI
141 mtmsrd r24 /* RI on */
142
143 /* Grab our linux cpu number */
144 mr r24,r3
145
146 /* Tell the master cpu we're here */
147 /* Relocation is off & we are located at an address less */
148 /* than 0x100, so only need to grab low order offset. */
149 std r24,__secondary_hold_acknowledge@l(0)
150 sync
151
152 /* All secondary cpus wait here until told to start. */
153100: ld r4,__secondary_hold_spinloop@l(0)
154 cmpdi 0,r4,1
155 bne 100b
156
157#ifdef CONFIG_HMT
158 b .hmt_init
159#else
160#ifdef CONFIG_SMP
161 mr r3,r24
162 b .pSeries_secondary_smp_init
163#else
164 BUG_OPCODE
165#endif
166#endif
167
168/* This value is used to mark exception frames on the stack. */
169 .section ".toc","aw"
170exception_marker:
171 .tc ID_72656773_68657265[TC],0x7265677368657265
172 .text
173
174/*
175 * The following macros define the code that appears as
176 * the prologue to each of the exception handlers. They
177 * are split into two parts to allow a single kernel binary
178 * to be used for pSeries and iSeries.
179 * LOL. One day... - paulus
180 */
181
182/*
183 * We make as much of the exception code common between native
184 * exception handlers (including pSeries LPAR) and iSeries LPAR
185 * implementations as possible.
186 */
187
188/*
189 * This is the start of the interrupt handlers for pSeries
190 * This code runs with relocation off.
191 */
192#define EX_R9 0
193#define EX_R10 8
194#define EX_R11 16
195#define EX_R12 24
196#define EX_R13 32
197#define EX_SRR0 40
198#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
199#define EX_DAR 48
200#define EX_LR 48 /* SLB miss saves LR, but not DAR */
201#define EX_DSISR 56
202#define EX_CCR 60
203
204#define EXCEPTION_PROLOG_PSERIES(area, label) \
205 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
206 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
207 std r10,area+EX_R10(r13); \
208 std r11,area+EX_R11(r13); \
209 std r12,area+EX_R12(r13); \
210 mfspr r9,SPRN_SPRG1; \
211 std r9,area+EX_R13(r13); \
212 mfcr r9; \
213 clrrdi r12,r13,32; /* get high part of &label */ \
214 mfmsr r10; \
215 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
216 ori r12,r12,(label)@l; /* virt addr of handler */ \
217 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
218 mtspr SPRN_SRR0,r12; \
219 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
220 mtspr SPRN_SRR1,r10; \
221 rfid; \
222 b . /* prevent speculative execution */
223
224/*
225 * This is the start of the interrupt handlers for iSeries
226 * This code runs with relocation on.
227 */
228#define EXCEPTION_PROLOG_ISERIES_1(area) \
229 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
230 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
231 std r10,area+EX_R10(r13); \
232 std r11,area+EX_R11(r13); \
233 std r12,area+EX_R12(r13); \
234 mfspr r9,SPRN_SPRG1; \
235 std r9,area+EX_R13(r13); \
236 mfcr r9
237
238#define EXCEPTION_PROLOG_ISERIES_2 \
239 mfmsr r10; \
240 ld r11,PACALPPACA+LPPACASRR0(r13); \
241 ld r12,PACALPPACA+LPPACASRR1(r13); \
242 ori r10,r10,MSR_RI; \
243 mtmsrd r10,1
244
245/*
246 * The common exception prolog is used for all except a few exceptions
247 * such as a segment miss on a kernel address. We have to be prepared
248 * to take another exception from the point where we first touch the
249 * kernel stack onwards.
250 *
251 * On entry r13 points to the paca, r9-r13 are saved in the paca,
252 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
253 * SRR1, and relocation is on.
254 */
255#define EXCEPTION_PROLOG_COMMON(n, area) \
256 andi. r10,r12,MSR_PR; /* See if coming from user */ \
257 mr r10,r1; /* Save r1 */ \
258 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
259 beq- 1f; \
260 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
2611: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
262 bge- cr1,bad_stack; /* abort if it is */ \
263 std r9,_CCR(r1); /* save CR in stackframe */ \
264 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
265 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
266 std r10,0(r1); /* make stack chain pointer */ \
267 std r0,GPR0(r1); /* save r0 in stackframe */ \
268 std r10,GPR1(r1); /* save r1 in stackframe */ \
269 std r2,GPR2(r1); /* save r2 in stackframe */ \
270 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
271 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
272 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
273 ld r10,area+EX_R10(r13); \
274 std r9,GPR9(r1); \
275 std r10,GPR10(r1); \
276 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
277 ld r10,area+EX_R12(r13); \
278 ld r11,area+EX_R13(r13); \
279 std r9,GPR11(r1); \
280 std r10,GPR12(r1); \
281 std r11,GPR13(r1); \
282 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
283 mflr r9; /* save LR in stackframe */ \
284 std r9,_LINK(r1); \
285 mfctr r10; /* save CTR in stackframe */ \
286 std r10,_CTR(r1); \
287 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
288 std r11,_XER(r1); \
289 li r9,(n)+1; \
290 std r9,_TRAP(r1); /* set trap number */ \
291 li r10,0; \
292 ld r11,exception_marker@toc(r2); \
293 std r10,RESULT(r1); /* clear regs->result */ \
294 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
295
296/*
297 * Exception vectors.
298 */
299#define STD_EXCEPTION_PSERIES(n, label) \
300 . = n; \
301 .globl label##_pSeries; \
302label##_pSeries: \
303 HMT_MEDIUM; \
304 mtspr SPRN_SPRG1,r13; /* save r13 */ \
305 RUNLATCH_ON(r13); \
306 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
307
308#define STD_EXCEPTION_ISERIES(n, label, area) \
309 .globl label##_iSeries; \
310label##_iSeries: \
311 HMT_MEDIUM; \
312 mtspr SPRN_SPRG1,r13; /* save r13 */ \
313 RUNLATCH_ON(r13); \
314 EXCEPTION_PROLOG_ISERIES_1(area); \
315 EXCEPTION_PROLOG_ISERIES_2; \
316 b label##_common
317
318#define MASKABLE_EXCEPTION_ISERIES(n, label) \
319 .globl label##_iSeries; \
320label##_iSeries: \
321 HMT_MEDIUM; \
322 mtspr SPRN_SPRG1,r13; /* save r13 */ \
323 RUNLATCH_ON(r13); \
324 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
325 lbz r10,PACAPROCENABLED(r13); \
326 cmpwi 0,r10,0; \
327 beq- label##_iSeries_masked; \
328 EXCEPTION_PROLOG_ISERIES_2; \
329 b label##_common; \
330
331#ifdef DO_SOFT_DISABLE
332#define DISABLE_INTS \
333 lbz r10,PACAPROCENABLED(r13); \
334 li r11,0; \
335 std r10,SOFTE(r1); \
336 mfmsr r10; \
337 stb r11,PACAPROCENABLED(r13); \
338 ori r10,r10,MSR_EE; \
339 mtmsrd r10,1
340
341#define ENABLE_INTS \
342 lbz r10,PACAPROCENABLED(r13); \
343 mfmsr r11; \
344 std r10,SOFTE(r1); \
345 ori r11,r11,MSR_EE; \
346 mtmsrd r11,1
347
348#else /* hard enable/disable interrupts */
349#define DISABLE_INTS
350
351#define ENABLE_INTS \
352 ld r12,_MSR(r1); \
353 mfmsr r11; \
354 rlwimi r11,r12,0,MSR_EE; \
355 mtmsrd r11,1
356
357#endif
358
359#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
360 .align 7; \
361 .globl label##_common; \
362label##_common: \
363 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
364 DISABLE_INTS; \
365 bl .save_nvgprs; \
366 addi r3,r1,STACK_FRAME_OVERHEAD; \
367 bl hdlr; \
368 b .ret_from_except
369
370#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
371 .align 7; \
372 .globl label##_common; \
373label##_common: \
374 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
375 DISABLE_INTS; \
376 addi r3,r1,STACK_FRAME_OVERHEAD; \
377 bl hdlr; \
378 b .ret_from_except_lite
379
380/*
381 * Start of pSeries system interrupt routines
382 */
383 . = 0x100
384 .globl __start_interrupts
385__start_interrupts:
386
387 STD_EXCEPTION_PSERIES(0x100, system_reset)
388
389 . = 0x200
390_machine_check_pSeries:
391 HMT_MEDIUM
392 mtspr SPRN_SPRG1,r13 /* save r13 */
393 RUNLATCH_ON(r13)
394 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
395
396 . = 0x300
397 .globl data_access_pSeries
398data_access_pSeries:
399 HMT_MEDIUM
400 mtspr SPRN_SPRG1,r13
401BEGIN_FTR_SECTION
402 mtspr SPRN_SPRG2,r12
403 mfspr r13,SPRN_DAR
404 mfspr r12,SPRN_DSISR
405 srdi r13,r13,60
406 rlwimi r13,r12,16,0x20
407 mfcr r12
408 cmpwi r13,0x2c
409 beq .do_stab_bolted_pSeries
410 mtcrf 0x80,r12
411 mfspr r12,SPRN_SPRG2
412END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
413 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
414
415 . = 0x380
416 .globl data_access_slb_pSeries
417data_access_slb_pSeries:
418 HMT_MEDIUM
419 mtspr SPRN_SPRG1,r13
420 RUNLATCH_ON(r13)
421 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
422 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
423 std r10,PACA_EXSLB+EX_R10(r13)
424 std r11,PACA_EXSLB+EX_R11(r13)
425 std r12,PACA_EXSLB+EX_R12(r13)
426 std r3,PACA_EXSLB+EX_R3(r13)
427 mfspr r9,SPRN_SPRG1
428 std r9,PACA_EXSLB+EX_R13(r13)
429 mfcr r9
430 mfspr r12,SPRN_SRR1 /* and SRR1 */
431 mfspr r3,SPRN_DAR
432 b .do_slb_miss /* Rel. branch works in real mode */
433
434 STD_EXCEPTION_PSERIES(0x400, instruction_access)
435
436 . = 0x480
437 .globl instruction_access_slb_pSeries
438instruction_access_slb_pSeries:
439 HMT_MEDIUM
440 mtspr SPRN_SPRG1,r13
441 RUNLATCH_ON(r13)
442 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
443 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
444 std r10,PACA_EXSLB+EX_R10(r13)
445 std r11,PACA_EXSLB+EX_R11(r13)
446 std r12,PACA_EXSLB+EX_R12(r13)
447 std r3,PACA_EXSLB+EX_R3(r13)
448 mfspr r9,SPRN_SPRG1
449 std r9,PACA_EXSLB+EX_R13(r13)
450 mfcr r9
451 mfspr r12,SPRN_SRR1 /* and SRR1 */
452 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
453 b .do_slb_miss /* Rel. branch works in real mode */
454
455 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
456 STD_EXCEPTION_PSERIES(0x600, alignment)
457 STD_EXCEPTION_PSERIES(0x700, program_check)
458 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
459 STD_EXCEPTION_PSERIES(0x900, decrementer)
460 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
461 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
462
463 . = 0xc00
464 .globl system_call_pSeries
465system_call_pSeries:
466 HMT_MEDIUM
467 RUNLATCH_ON(r9)
468 mr r9,r13
469 mfmsr r10
470 mfspr r13,SPRN_SPRG3
471 mfspr r11,SPRN_SRR0
472 clrrdi r12,r13,32
473 oris r12,r12,system_call_common@h
474 ori r12,r12,system_call_common@l
475 mtspr SPRN_SRR0,r12
476 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
477 mfspr r12,SPRN_SRR1
478 mtspr SPRN_SRR1,r10
479 rfid
480 b . /* prevent speculative execution */
481
482 STD_EXCEPTION_PSERIES(0xd00, single_step)
483 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
484
485 /* We need to deal with the Altivec unavailable exception
486 * here which is at 0xf20, thus in the middle of the
487 * prolog code of the PerformanceMonitor one. A little
488 * trickery is thus necessary
489 */
490 . = 0xf00
491 b performance_monitor_pSeries
492
493 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
494
495 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
496 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
497
498 . = 0x3000
499
500/*** pSeries interrupt support ***/
501
502 /* moved from 0xf00 */
503 STD_EXCEPTION_PSERIES(., performance_monitor)
504
505 .align 7
506_GLOBAL(do_stab_bolted_pSeries)
507 mtcrf 0x80,r12
508 mfspr r12,SPRN_SPRG2
509 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
510
511/*
512 * Vectors for the FWNMI option. Share common code.
513 */
514 .globl system_reset_fwnmi
515system_reset_fwnmi:
516 HMT_MEDIUM
517 mtspr SPRN_SPRG1,r13 /* save r13 */
518 RUNLATCH_ON(r13)
519 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
520
521 .globl machine_check_fwnmi
522machine_check_fwnmi:
523 HMT_MEDIUM
524 mtspr SPRN_SPRG1,r13 /* save r13 */
525 RUNLATCH_ON(r13)
526 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
527
528#ifdef CONFIG_PPC_ISERIES
529/*** ISeries-LPAR interrupt handlers ***/
530
531 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
532
533 .globl data_access_iSeries
534data_access_iSeries:
535 mtspr SPRN_SPRG1,r13
536BEGIN_FTR_SECTION
537 mtspr SPRN_SPRG2,r12
538 mfspr r13,SPRN_DAR
539 mfspr r12,SPRN_DSISR
540 srdi r13,r13,60
541 rlwimi r13,r12,16,0x20
542 mfcr r12
543 cmpwi r13,0x2c
544 beq .do_stab_bolted_iSeries
545 mtcrf 0x80,r12
546 mfspr r12,SPRN_SPRG2
547END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
548 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
549 EXCEPTION_PROLOG_ISERIES_2
550 b data_access_common
551
552.do_stab_bolted_iSeries:
553 mtcrf 0x80,r12
554 mfspr r12,SPRN_SPRG2
555 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
556 EXCEPTION_PROLOG_ISERIES_2
557 b .do_stab_bolted
558
559 .globl data_access_slb_iSeries
560data_access_slb_iSeries:
561 mtspr SPRN_SPRG1,r13 /* save r13 */
562 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
563 std r3,PACA_EXSLB+EX_R3(r13)
564 ld r12,PACALPPACA+LPPACASRR1(r13)
565 mfspr r3,SPRN_DAR
566 b .do_slb_miss
567
568 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
569
570 .globl instruction_access_slb_iSeries
571instruction_access_slb_iSeries:
572 mtspr SPRN_SPRG1,r13 /* save r13 */
573 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
574 std r3,PACA_EXSLB+EX_R3(r13)
575 ld r12,PACALPPACA+LPPACASRR1(r13)
576 ld r3,PACALPPACA+LPPACASRR0(r13)
577 b .do_slb_miss
578
579 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
580 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
581 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
582 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
583 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
584 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
585 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
586
587 .globl system_call_iSeries
588system_call_iSeries:
589 mr r9,r13
590 mfspr r13,SPRN_SPRG3
591 EXCEPTION_PROLOG_ISERIES_2
592 b system_call_common
593
594 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
595 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
596 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
597
598 .globl system_reset_iSeries
599system_reset_iSeries:
600 mfspr r13,SPRN_SPRG3 /* Get paca address */
601 mfmsr r24
602 ori r24,r24,MSR_RI
603 mtmsrd r24 /* RI on */
604 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
605 cmpwi 0,r24,0 /* Are we processor 0? */
606 beq .__start_initialization_iSeries /* Start up the first processor */
607 mfspr r4,SPRN_CTRLF
608 li r5,CTRL_RUNLATCH /* Turn off the run light */
609 andc r4,r4,r5
610 mtspr SPRN_CTRLT,r4
611
6121:
613 HMT_LOW
614#ifdef CONFIG_SMP
615 lbz r23,PACAPROCSTART(r13) /* Test if this processor
616 * should start */
617 sync
618 LOADADDR(r3,current_set)
619 sldi r28,r24,3 /* get current_set[cpu#] */
620 ldx r3,r3,r28
621 addi r1,r3,THREAD_SIZE
622 subi r1,r1,STACK_FRAME_OVERHEAD
623
624 cmpwi 0,r23,0
625 beq iSeries_secondary_smp_loop /* Loop until told to go */
626 bne .__secondary_start /* Loop until told to go */
627iSeries_secondary_smp_loop:
628 /* Let the Hypervisor know we are alive */
629 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
630 lis r3,0x8002
631 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
632#else /* CONFIG_SMP */
633 /* Yield the processor. This is required for non-SMP kernels
634 which are running on multi-threaded machines. */
635 lis r3,0x8000
636 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
637 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
638 li r4,0 /* "yield timed" */
639 li r5,-1 /* "yield forever" */
640#endif /* CONFIG_SMP */
641 li r0,-1 /* r0=-1 indicates a Hypervisor call */
642 sc /* Invoke the hypervisor via a system call */
643 mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */
644 b 1b /* If SMP not configured, secondaries
645 * loop forever */
646
647 .globl decrementer_iSeries_masked
648decrementer_iSeries_masked:
649 li r11,1
650 stb r11,PACALPPACA+LPPACADECRINT(r13)
651 lwz r12,PACADEFAULTDECR(r13)
652 mtspr SPRN_DEC,r12
653 /* fall through */
654
655 .globl hardware_interrupt_iSeries_masked
656hardware_interrupt_iSeries_masked:
657 mtcrf 0x80,r9 /* Restore regs */
658 ld r11,PACALPPACA+LPPACASRR0(r13)
659 ld r12,PACALPPACA+LPPACASRR1(r13)
660 mtspr SPRN_SRR0,r11
661 mtspr SPRN_SRR1,r12
662 ld r9,PACA_EXGEN+EX_R9(r13)
663 ld r10,PACA_EXGEN+EX_R10(r13)
664 ld r11,PACA_EXGEN+EX_R11(r13)
665 ld r12,PACA_EXGEN+EX_R12(r13)
666 ld r13,PACA_EXGEN+EX_R13(r13)
667 rfid
668 b . /* prevent speculative execution */
669#endif /* CONFIG_PPC_ISERIES */
670
671/*** Common interrupt handlers ***/
672
673 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
674
675 /*
676 * Machine check is different because we use a different
677 * save area: PACA_EXMC instead of PACA_EXGEN.
678 */
679 .align 7
680 .globl machine_check_common
681machine_check_common:
682 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
683 DISABLE_INTS
684 bl .save_nvgprs
685 addi r3,r1,STACK_FRAME_OVERHEAD
686 bl .machine_check_exception
687 b .ret_from_except
688
689 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
690 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
691 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
692 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
693 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
694 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
695 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
696#ifdef CONFIG_ALTIVEC
697 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
698#else
699 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
700#endif
701
702/*
703 * Here we have detected that the kernel stack pointer is bad.
704 * R9 contains the saved CR, r13 points to the paca,
705 * r10 contains the (bad) kernel stack pointer,
706 * r11 and r12 contain the saved SRR0 and SRR1.
707 * We switch to using an emergency stack, save the registers there,
708 * and call kernel_bad_stack(), which panics.
709 */
710bad_stack:
711 ld r1,PACAEMERGSP(r13)
712 subi r1,r1,64+INT_FRAME_SIZE
713 std r9,_CCR(r1)
714 std r10,GPR1(r1)
715 std r11,_NIP(r1)
716 std r12,_MSR(r1)
717 mfspr r11,SPRN_DAR
718 mfspr r12,SPRN_DSISR
719 std r11,_DAR(r1)
720 std r12,_DSISR(r1)
721 mflr r10
722 mfctr r11
723 mfxer r12
724 std r10,_LINK(r1)
725 std r11,_CTR(r1)
726 std r12,_XER(r1)
727 SAVE_GPR(0,r1)
728 SAVE_GPR(2,r1)
729 SAVE_4GPRS(3,r1)
730 SAVE_2GPRS(7,r1)
731 SAVE_10GPRS(12,r1)
732 SAVE_10GPRS(22,r1)
733 addi r11,r1,INT_FRAME_SIZE
734 std r11,0(r1)
735 li r12,0
736 std r12,0(r11)
737 ld r2,PACATOC(r13)
7381: addi r3,r1,STACK_FRAME_OVERHEAD
739 bl .kernel_bad_stack
740 b 1b
741
742/*
743 * Return from an exception with minimal checks.
744 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
745 * If interrupts have been enabled, or anything has been
746 * done that might have changed the scheduling status of
747 * any task or sent any task a signal, you should use
748 * ret_from_except or ret_from_except_lite instead of this.
749 */
750 .globl fast_exception_return
751fast_exception_return:
752 ld r12,_MSR(r1)
753 ld r11,_NIP(r1)
754 andi. r3,r12,MSR_RI /* check if RI is set */
755 beq- unrecov_fer
756 ld r3,_CCR(r1)
757 ld r4,_LINK(r1)
758 ld r5,_CTR(r1)
759 ld r6,_XER(r1)
760 mtcr r3
761 mtlr r4
762 mtctr r5
763 mtxer r6
764 REST_GPR(0, r1)
765 REST_8GPRS(2, r1)
766
767 mfmsr r10
768 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
769 mtmsrd r10,1
770
771 mtspr SPRN_SRR1,r12
772 mtspr SPRN_SRR0,r11
773 REST_4GPRS(10, r1)
774 ld r1,GPR1(r1)
775 rfid
776 b . /* prevent speculative execution */
777
778unrecov_fer:
779 bl .save_nvgprs
7801: addi r3,r1,STACK_FRAME_OVERHEAD
781 bl .unrecoverable_exception
782 b 1b
783
784/*
785 * Here r13 points to the paca, r9 contains the saved CR,
786 * SRR0 and SRR1 are saved in r11 and r12,
787 * r9 - r13 are saved in paca->exgen.
788 */
789 .align 7
790 .globl data_access_common
791data_access_common:
792 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
793 mfspr r10,SPRN_DAR
794 std r10,PACA_EXGEN+EX_DAR(r13)
795 mfspr r10,SPRN_DSISR
796 stw r10,PACA_EXGEN+EX_DSISR(r13)
797 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
798 ld r3,PACA_EXGEN+EX_DAR(r13)
799 lwz r4,PACA_EXGEN+EX_DSISR(r13)
800 li r5,0x300
801 b .do_hash_page /* Try to handle as hpte fault */
802
803 .align 7
804 .globl instruction_access_common
805instruction_access_common:
806 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
807 ld r3,_NIP(r1)
808 andis. r4,r12,0x5820
809 li r5,0x400
810 b .do_hash_page /* Try to handle as hpte fault */
811
812 .align 7
813 .globl hardware_interrupt_common
814 .globl hardware_interrupt_entry
815hardware_interrupt_common:
816 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
817hardware_interrupt_entry:
818 DISABLE_INTS
819 addi r3,r1,STACK_FRAME_OVERHEAD
820 bl .do_IRQ
821 b .ret_from_except_lite
822
823 .align 7
824 .globl alignment_common
825alignment_common:
826 mfspr r10,SPRN_DAR
827 std r10,PACA_EXGEN+EX_DAR(r13)
828 mfspr r10,SPRN_DSISR
829 stw r10,PACA_EXGEN+EX_DSISR(r13)
830 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
831 ld r3,PACA_EXGEN+EX_DAR(r13)
832 lwz r4,PACA_EXGEN+EX_DSISR(r13)
833 std r3,_DAR(r1)
834 std r4,_DSISR(r1)
835 bl .save_nvgprs
836 addi r3,r1,STACK_FRAME_OVERHEAD
837 ENABLE_INTS
838 bl .alignment_exception
839 b .ret_from_except
840
841 .align 7
842 .globl program_check_common
843program_check_common:
844 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
845 bl .save_nvgprs
846 addi r3,r1,STACK_FRAME_OVERHEAD
847 ENABLE_INTS
848 bl .program_check_exception
849 b .ret_from_except
850
851 .align 7
852 .globl fp_unavailable_common
853fp_unavailable_common:
854 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
855 bne .load_up_fpu /* if from user, just load it up */
856 bl .save_nvgprs
857 addi r3,r1,STACK_FRAME_OVERHEAD
858 ENABLE_INTS
859 bl .kernel_fp_unavailable_exception
860 BUG_OPCODE
861
862 .align 7
863 .globl altivec_unavailable_common
864altivec_unavailable_common:
865 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
866#ifdef CONFIG_ALTIVEC
867BEGIN_FTR_SECTION
868 bne .load_up_altivec /* if from user, just load it up */
869END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
870#endif
871 bl .save_nvgprs
872 addi r3,r1,STACK_FRAME_OVERHEAD
873 ENABLE_INTS
874 bl .altivec_unavailable_exception
875 b .ret_from_except
876
877#ifdef CONFIG_ALTIVEC
878/*
879 * load_up_altivec(unused, unused, tsk)
880 * Disable VMX for the task which had it previously,
881 * and save its vector registers in its thread_struct.
882 * Enables the VMX for use in the kernel on return.
883 * On SMP we know the VMX is free, since we give it up every
884 * switch (ie, no lazy save of the vector registers).
885 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
886 */
887_STATIC(load_up_altivec)
888 mfmsr r5 /* grab the current MSR */
889 oris r5,r5,MSR_VEC@h
890 mtmsrd r5 /* enable use of VMX now */
891 isync
892
893/*
894 * For SMP, we don't do lazy VMX switching because it just gets too
895 * horrendously complex, especially when a task switches from one CPU
896 * to another. Instead we call giveup_altvec in switch_to.
897 * VRSAVE isn't dealt with here, that is done in the normal context
898 * switch code. Note that we could rely on vrsave value to eventually
899 * avoid saving all of the VREGs here...
900 */
901#ifndef CONFIG_SMP
902 ld r3,last_task_used_altivec@got(r2)
903 ld r4,0(r3)
904 cmpdi 0,r4,0
905 beq 1f
906 /* Save VMX state to last_task_used_altivec's THREAD struct */
907 addi r4,r4,THREAD
908 SAVE_32VRS(0,r5,r4)
909 mfvscr vr0
910 li r10,THREAD_VSCR
911 stvx vr0,r10,r4
912 /* Disable VMX for last_task_used_altivec */
913 ld r5,PT_REGS(r4)
914 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
915 lis r6,MSR_VEC@h
916 andc r4,r4,r6
917 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9181:
919#endif /* CONFIG_SMP */
920 /* Hack: if we get an altivec unavailable trap with VRSAVE
921 * set to all zeros, we assume this is a broken application
922 * that fails to set it properly, and thus we switch it to
923 * all 1's
924 */
925 mfspr r4,SPRN_VRSAVE
926 cmpdi 0,r4,0
927 bne+ 1f
928 li r4,-1
929 mtspr SPRN_VRSAVE,r4
9301:
931 /* enable use of VMX after return */
932 ld r4,PACACURRENT(r13)
933 addi r5,r4,THREAD /* Get THREAD */
934 oris r12,r12,MSR_VEC@h
935 std r12,_MSR(r1)
936 li r4,1
937 li r10,THREAD_VSCR
938 stw r4,THREAD_USED_VR(r5)
939 lvx vr0,r10,r5
940 mtvscr vr0
941 REST_32VRS(0,r4,r5)
942#ifndef CONFIG_SMP
943 /* Update last_task_used_math to 'current' */
944 subi r4,r5,THREAD /* Back to 'current' */
945 std r4,0(r3)
946#endif /* CONFIG_SMP */
947 /* restore registers and return */
948 b fast_exception_return
949#endif /* CONFIG_ALTIVEC */
950
951/*
952 * Hash table stuff
953 */
954 .align 7
955_GLOBAL(do_hash_page)
956 std r3,_DAR(r1)
957 std r4,_DSISR(r1)
958
959 andis. r0,r4,0xa450 /* weird error? */
960 bne- .handle_page_fault /* if not, try to insert a HPTE */
961BEGIN_FTR_SECTION
962 andis. r0,r4,0x0020 /* Is it a segment table fault? */
963 bne- .do_ste_alloc /* If so handle it */
964END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
965
966 /*
967 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
968 * accessing a userspace segment (even from the kernel). We assume
969 * kernel addresses always have the high bit set.
970 */
971 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
972 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
973 orc r0,r12,r0 /* MSR_PR | ~high_bit */
974 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
975 ori r4,r4,1 /* add _PAGE_PRESENT */
976 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
977
978 /*
979 * On iSeries, we soft-disable interrupts here, then
980 * hard-enable interrupts so that the hash_page code can spin on
981 * the hash_table_lock without problems on a shared processor.
982 */
983 DISABLE_INTS
984
985 /*
986 * r3 contains the faulting address
987 * r4 contains the required access permissions
988 * r5 contains the trap number
989 *
990 * at return r3 = 0 for success
991 */
992 bl .hash_page /* build HPTE if possible */
993 cmpdi r3,0 /* see if hash_page succeeded */
994
995#ifdef DO_SOFT_DISABLE
996 /*
997 * If we had interrupts soft-enabled at the point where the
998 * DSI/ISI occurred, and an interrupt came in during hash_page,
999 * handle it now.
1000 * We jump to ret_from_except_lite rather than fast_exception_return
1001 * because ret_from_except_lite will check for and handle pending
1002 * interrupts if necessary.
1003 */
1004 beq .ret_from_except_lite
1005 /* For a hash failure, we don't bother re-enabling interrupts */
1006 ble- 12f
1007
1008 /*
1009 * hash_page couldn't handle it, set soft interrupt enable back
1010 * to what it was before the trap. Note that .local_irq_restore
1011 * handles any interrupts pending at this point.
1012 */
1013 ld r3,SOFTE(r1)
1014 bl .local_irq_restore
1015 b 11f
1016#else
1017 beq fast_exception_return /* Return from exception on success */
1018 ble- 12f /* Failure return from hash_page */
1019
1020 /* fall through */
1021#endif
1022
1023/* Here we have a page fault that hash_page can't handle. */
1024_GLOBAL(handle_page_fault)
1025 ENABLE_INTS
102611: ld r4,_DAR(r1)
1027 ld r5,_DSISR(r1)
1028 addi r3,r1,STACK_FRAME_OVERHEAD
1029 bl .do_page_fault
1030 cmpdi r3,0
1031 beq+ .ret_from_except_lite
1032 bl .save_nvgprs
1033 mr r5,r3
1034 addi r3,r1,STACK_FRAME_OVERHEAD
1035 lwz r4,_DAR(r1)
1036 bl .bad_page_fault
1037 b .ret_from_except
1038
1039/* We have a page fault that hash_page could handle but HV refused
1040 * the PTE insertion
1041 */
104212: bl .save_nvgprs
1043 addi r3,r1,STACK_FRAME_OVERHEAD
1044 lwz r4,_DAR(r1)
1045 bl .low_hash_fault
1046 b .ret_from_except
1047
1048 /* here we have a segment miss */
1049_GLOBAL(do_ste_alloc)
1050 bl .ste_allocate /* try to insert stab entry */
1051 cmpdi r3,0
1052 beq+ fast_exception_return
1053 b .handle_page_fault
1054
1055/*
1056 * r13 points to the PACA, r9 contains the saved CR,
1057 * r11 and r12 contain the saved SRR0 and SRR1.
1058 * r9 - r13 are saved in paca->exslb.
1059 * We assume we aren't going to take any exceptions during this procedure.
1060 * We assume (DAR >> 60) == 0xc.
1061 */
1062 .align 7
1063_GLOBAL(do_stab_bolted)
1064 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1065 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1066
1067 /* Hash to the primary group */
1068 ld r10,PACASTABVIRT(r13)
1069 mfspr r11,SPRN_DAR
1070 srdi r11,r11,28
1071 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1072
1073 /* Calculate VSID */
1074 /* This is a kernel address, so protovsid = ESID */
1075 ASM_VSID_SCRAMBLE(r11, r9)
1076 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1077
1078 /* Search the primary group for a free entry */
10791: ld r11,0(r10) /* Test valid bit of the current ste */
1080 andi. r11,r11,0x80
1081 beq 2f
1082 addi r10,r10,16
1083 andi. r11,r10,0x70
1084 bne 1b
1085
1086 /* Stick for only searching the primary group for now. */
1087 /* At least for now, we use a very simple random castout scheme */
1088 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1089 mftb r11
1090 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1091 ori r11,r11,0x10
1092
1093 /* r10 currently points to an ste one past the group of interest */
1094 /* make it point to the randomly selected entry */
1095 subi r10,r10,128
1096 or r10,r10,r11 /* r10 is the entry to invalidate */
1097
1098 isync /* mark the entry invalid */
1099 ld r11,0(r10)
1100 rldicl r11,r11,56,1 /* clear the valid bit */
1101 rotldi r11,r11,8
1102 std r11,0(r10)
1103 sync
1104
1105 clrrdi r11,r11,28 /* Get the esid part of the ste */
1106 slbie r11
1107
11082: std r9,8(r10) /* Store the vsid part of the ste */
1109 eieio
1110
1111 mfspr r11,SPRN_DAR /* Get the new esid */
1112 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1113 ori r11,r11,0x90 /* Turn on valid and kp */
1114 std r11,0(r10) /* Put new entry back into the stab */
1115
1116 sync
1117
1118 /* All done -- return from exception. */
1119 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1120 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1121
1122 andi. r10,r12,MSR_RI
1123 beq- unrecov_slb
1124
1125 mtcrf 0x80,r9 /* restore CR */
1126
1127 mfmsr r10
1128 clrrdi r10,r10,2
1129 mtmsrd r10,1
1130
1131 mtspr SPRN_SRR0,r11
1132 mtspr SPRN_SRR1,r12
1133 ld r9,PACA_EXSLB+EX_R9(r13)
1134 ld r10,PACA_EXSLB+EX_R10(r13)
1135 ld r11,PACA_EXSLB+EX_R11(r13)
1136 ld r12,PACA_EXSLB+EX_R12(r13)
1137 ld r13,PACA_EXSLB+EX_R13(r13)
1138 rfid
1139 b . /* prevent speculative execution */
1140
1141/*
1142 * r13 points to the PACA, r9 contains the saved CR,
1143 * r11 and r12 contain the saved SRR0 and SRR1.
1144 * r3 has the faulting address
1145 * r9 - r13 are saved in paca->exslb.
1146 * r3 is saved in paca->slb_r3
1147 * We assume we aren't going to take any exceptions during this procedure.
1148 */
1149_GLOBAL(do_slb_miss)
1150 mflr r10
1151
1152 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1153 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1154
1155 bl .slb_allocate /* handle it */
1156
1157 /* All done -- return from exception. */
1158
1159 ld r10,PACA_EXSLB+EX_LR(r13)
1160 ld r3,PACA_EXSLB+EX_R3(r13)
1161 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1162#ifdef CONFIG_PPC_ISERIES
1163 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
1164#endif /* CONFIG_PPC_ISERIES */
1165
1166 mtlr r10
1167
1168 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1169 beq- unrecov_slb
1170
1171.machine push
1172.machine "power4"
1173 mtcrf 0x80,r9
1174 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1175.machine pop
1176
1177#ifdef CONFIG_PPC_ISERIES
1178 mtspr SPRN_SRR0,r11
1179 mtspr SPRN_SRR1,r12
1180#endif /* CONFIG_PPC_ISERIES */
1181 ld r9,PACA_EXSLB+EX_R9(r13)
1182 ld r10,PACA_EXSLB+EX_R10(r13)
1183 ld r11,PACA_EXSLB+EX_R11(r13)
1184 ld r12,PACA_EXSLB+EX_R12(r13)
1185 ld r13,PACA_EXSLB+EX_R13(r13)
1186 rfid
1187 b . /* prevent speculative execution */
1188
1189unrecov_slb:
1190 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1191 DISABLE_INTS
1192 bl .save_nvgprs
11931: addi r3,r1,STACK_FRAME_OVERHEAD
1194 bl .unrecoverable_exception
1195 b 1b
1196
1197/*
1198 * Space for CPU0's segment table.
1199 *
1200 * On iSeries, the hypervisor must fill in at least one entry before
1201 * we get control (with relocate on). The address is give to the hv
1202 * as a page number (see xLparMap in lpardata.c), so this must be at a
1203 * fixed address (the linker can't compute (u64)&initial_stab >>
1204 * PAGE_SHIFT).
1205 */
1206 . = STAB0_PHYS_ADDR /* 0x6000 */
1207 .globl initial_stab
1208initial_stab:
1209 .space 4096
1210
1211/*
1212 * Data area reserved for FWNMI option.
1213 * This address (0x7000) is fixed by the RPA.
1214 */
1215 .= 0x7000
1216 .globl fwnmi_data_area
1217fwnmi_data_area:
1218
1219 /* iSeries does not use the FWNMI stuff, so it is safe to put
1220 * this here, even if we later allow kernels that will boot on
1221 * both pSeries and iSeries */
1222#ifdef CONFIG_PPC_ISERIES
1223 . = LPARMAP_PHYS
1224#include "lparmap.s"
1225/*
1226 * This ".text" is here for old compilers that generate a trailing
1227 * .note section when compiling .c files to .s
1228 */
1229 .text
1230#endif /* CONFIG_PPC_ISERIES */
1231
1232 . = 0x8000
1233
1234/*
1235 * On pSeries, secondary processors spin in the following code.
1236 * At entry, r3 = this processor's number (physical cpu id)
1237 */
1238_GLOBAL(pSeries_secondary_smp_init)
1239 mr r24,r3
1240
1241 /* turn on 64-bit mode */
1242 bl .enable_64b_mode
1243 isync
1244
1245 /* Copy some CPU settings from CPU 0 */
1246 bl .__restore_cpu_setup
1247
1248 /* Set up a paca value for this processor. Since we have the
1249 * physical cpu id in r24, we need to search the pacas to find
1250 * which logical id maps to our physical one.
1251 */
1252 LOADADDR(r13, paca) /* Get base vaddr of paca array */
1253 li r5,0 /* logical cpu id */
12541: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1255 cmpw r6,r24 /* Compare to our id */
1256 beq 2f
1257 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1258 addi r5,r5,1
1259 cmpwi r5,NR_CPUS
1260 blt 1b
1261
1262 mr r3,r24 /* not found, copy phys to r3 */
1263 b .kexec_wait /* next kernel might do better */
1264
12652: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1266 /* From now on, r24 is expected to be logical cpuid */
1267 mr r24,r5
12683: HMT_LOW
1269 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1270 /* start. */
1271 sync
1272
1273 /* Create a temp kernel stack for use before relocation is on. */
1274 ld r1,PACAEMERGSP(r13)
1275 subi r1,r1,STACK_FRAME_OVERHEAD
1276
1277 cmpwi 0,r23,0
1278#ifdef CONFIG_SMP
1279 bne .__secondary_start
1280#endif
1281 b 3b /* Loop until told to go */
1282
1283#ifdef CONFIG_PPC_ISERIES
1284_STATIC(__start_initialization_iSeries)
1285 /* Clear out the BSS */
1286 LOADADDR(r11,__bss_stop)
1287 LOADADDR(r8,__bss_start)
1288 sub r11,r11,r8 /* bss size */
1289 addi r11,r11,7 /* round up to an even double word */
1290 rldicl. r11,r11,61,3 /* shift right by 3 */
1291 beq 4f
1292 addi r8,r8,-8
1293 li r0,0
1294 mtctr r11 /* zero this many doublewords */
12953: stdu r0,8(r8)
1296 bdnz 3b
12974:
1298 LOADADDR(r1,init_thread_union)
1299 addi r1,r1,THREAD_SIZE
1300 li r0,0
1301 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1302
1303 LOADADDR(r3,cpu_specs)
1304 LOADADDR(r4,cur_cpu_spec)
1305 li r5,0
1306 bl .identify_cpu
1307
1308 LOADADDR(r2,__toc_start)
1309 addi r2,r2,0x4000
1310 addi r2,r2,0x4000
1311
1312 bl .iSeries_early_setup
1313 bl .early_setup
1314
1315 /* relocation is on at this point */
1316
1317 b .start_here_common
1318#endif /* CONFIG_PPC_ISERIES */
1319
1320#ifdef CONFIG_PPC_MULTIPLATFORM
1321
1322_STATIC(__mmu_off)
1323 mfmsr r3
1324 andi. r0,r3,MSR_IR|MSR_DR
1325 beqlr
1326 andc r3,r3,r0
1327 mtspr SPRN_SRR0,r4
1328 mtspr SPRN_SRR1,r3
1329 sync
1330 rfid
1331 b . /* prevent speculative execution */
1332
1333
1334/*
1335 * Here is our main kernel entry point. We support currently 2 kind of entries
1336 * depending on the value of r5.
1337 *
1338 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1339 * in r3...r7
1340 *
1341 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1342 * DT block, r4 is a physical pointer to the kernel itself
1343 *
1344 */
1345_GLOBAL(__start_initialization_multiplatform)
1346 /*
1347 * Are we booted from a PROM Of-type client-interface ?
1348 */
1349 cmpldi cr0,r5,0
1350 bne .__boot_from_prom /* yes -> prom */
1351
1352 /* Save parameters */
1353 mr r31,r3
1354 mr r30,r4
1355
1356 /* Make sure we are running in 64 bits mode */
1357 bl .enable_64b_mode
1358
1359 /* Setup some critical 970 SPRs before switching MMU off */
1360 bl .__970_cpu_preinit
1361
1362 /* cpu # */
1363 li r24,0
1364
1365 /* Switch off MMU if not already */
1366 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1367 add r4,r4,r30
1368 bl .__mmu_off
1369 b .__after_prom_start
1370
1371_STATIC(__boot_from_prom)
1372 /* Save parameters */
1373 mr r31,r3
1374 mr r30,r4
1375 mr r29,r5
1376 mr r28,r6
1377 mr r27,r7
1378
1379 /* Make sure we are running in 64 bits mode */
1380 bl .enable_64b_mode
1381
1382 /* put a relocation offset into r3 */
1383 bl .reloc_offset
1384
1385 LOADADDR(r2,__toc_start)
1386 addi r2,r2,0x4000
1387 addi r2,r2,0x4000
1388
1389 /* Relocate the TOC from a virt addr to a real addr */
1390 add r2,r2,r3
1391
1392 /* Restore parameters */
1393 mr r3,r31
1394 mr r4,r30
1395 mr r5,r29
1396 mr r6,r28
1397 mr r7,r27
1398
1399 /* Do all of the interaction with OF client interface */
1400 bl .prom_init
1401 /* We never return */
1402 trap
1403
1404/*
1405 * At this point, r3 contains the physical address we are running at,
1406 * returned by prom_init()
1407 */
1408_STATIC(__after_prom_start)
1409
1410/*
1411 * We need to run with __start at physical address 0.
1412 * This will leave some code in the first 256B of
1413 * real memory, which are reserved for software use.
1414 * The remainder of the first page is loaded with the fixed
1415 * interrupt vectors. The next two pages are filled with
1416 * unknown exception placeholders.
1417 *
1418 * Note: This process overwrites the OF exception vectors.
1419 * r26 == relocation offset
1420 * r27 == KERNELBASE
1421 */
1422 bl .reloc_offset
1423 mr r26,r3
1424 SET_REG_TO_CONST(r27,KERNELBASE)
1425
1426 li r3,0 /* target addr */
1427
1428 // XXX FIXME: Use phys returned by OF (r30)
1429 add r4,r27,r26 /* source addr */
1430 /* current address of _start */
1431 /* i.e. where we are running */
1432 /* the source addr */
1433
1434 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1435 sub r5,r5,r27
1436
1437 li r6,0x100 /* Start offset, the first 0x100 */
1438 /* bytes were copied earlier. */
1439
1440 bl .copy_and_flush /* copy the first n bytes */
1441 /* this includes the code being */
1442 /* executed here. */
1443
1444 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1445 mtctr r0 /* that we just made/relocated */
1446 bctr
1447
14484: LOADADDR(r5,klimit)
1449 add r5,r5,r26
1450 ld r5,0(r5) /* get the value of klimit */
1451 sub r5,r5,r27
1452 bl .copy_and_flush /* copy the rest */
1453 b .start_here_multiplatform
1454
1455#endif /* CONFIG_PPC_MULTIPLATFORM */
1456
1457/*
1458 * Copy routine used to copy the kernel to start at physical address 0
1459 * and flush and invalidate the caches as needed.
1460 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1461 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1462 *
1463 * Note: this routine *only* clobbers r0, r6 and lr
1464 */
1465_GLOBAL(copy_and_flush)
1466 addi r5,r5,-8
1467 addi r6,r6,-8
14684: li r0,16 /* Use the least common */
1469 /* denominator cache line */
1470 /* size. This results in */
1471 /* extra cache line flushes */
1472 /* but operation is correct. */
1473 /* Can't get cache line size */
1474 /* from NACA as it is being */
1475 /* moved too. */
1476
1477 mtctr r0 /* put # words/line in ctr */
14783: addi r6,r6,8 /* copy a cache line */
1479 ldx r0,r6,r4
1480 stdx r0,r6,r3
1481 bdnz 3b
1482 dcbst r6,r3 /* write it to memory */
1483 sync
1484 icbi r6,r3 /* flush the icache line */
1485 cmpld 0,r6,r5
1486 blt 4b
1487 sync
1488 addi r5,r5,8
1489 addi r6,r6,8
1490 blr
1491
1492.align 8
1493copy_to_here:
1494
1495#ifdef CONFIG_SMP
1496#ifdef CONFIG_PPC_PMAC
1497/*
1498 * On PowerMac, secondary processors starts from the reset vector, which
1499 * is temporarily turned into a call to one of the functions below.
1500 */
1501 .section ".text";
1502 .align 2 ;
1503
1504 .globl __secondary_start_pmac_0
1505__secondary_start_pmac_0:
1506 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
1507 li r24,0
1508 b 1f
1509 li r24,1
1510 b 1f
1511 li r24,2
1512 b 1f
1513 li r24,3
15141:
1515
1516_GLOBAL(pmac_secondary_start)
1517 /* turn on 64-bit mode */
1518 bl .enable_64b_mode
1519 isync
1520
1521 /* Copy some CPU settings from CPU 0 */
1522 bl .__restore_cpu_setup
1523
1524 /* pSeries do that early though I don't think we really need it */
1525 mfmsr r3
1526 ori r3,r3,MSR_RI
1527 mtmsrd r3 /* RI on */
1528
1529 /* Set up a paca value for this processor. */
1530 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1531 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1532 add r13,r13,r4 /* for this processor. */
1533 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1534
1535 /* Create a temp kernel stack for use before relocation is on. */
1536 ld r1,PACAEMERGSP(r13)
1537 subi r1,r1,STACK_FRAME_OVERHEAD
1538
1539 b .__secondary_start
1540
1541#endif /* CONFIG_PPC_PMAC */
1542
1543/*
1544 * This function is called after the master CPU has released the
1545 * secondary processors. The execution environment is relocation off.
1546 * The paca for this processor has the following fields initialized at
1547 * this point:
1548 * 1. Processor number
1549 * 2. Segment table pointer (virtual address)
1550 * On entry the following are set:
1551 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1552 * r24 = cpu# (in Linux terms)
1553 * r13 = paca virtual address
1554 * SPRG3 = paca virtual address
1555 */
1556_GLOBAL(__secondary_start)
1557
1558 HMT_MEDIUM /* Set thread priority to MEDIUM */
1559
1560 ld r2,PACATOC(r13)
1561 li r6,0
1562 stb r6,PACAPROCENABLED(r13)
1563
1564#ifndef CONFIG_PPC_ISERIES
1565 /* Initialize the page table pointer register. */
1566 LOADADDR(r6,_SDR1)
1567 ld r6,0(r6) /* get the value of _SDR1 */
1568 mtspr SPRN_SDR1,r6 /* set the htab location */
1569#endif
1570 /* Initialize the first segment table (or SLB) entry */
1571 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
1572 bl .stab_initialize
1573
1574 /* Initialize the kernel stack. Just a repeat for iSeries. */
1575 LOADADDR(r3,current_set)
1576 sldi r28,r24,3 /* get current_set[cpu#] */
1577 ldx r1,r3,r28
1578 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1579 std r1,PACAKSAVE(r13)
1580
1581 ld r3,PACASTABREAL(r13) /* get raddr of segment table */
1582 ori r4,r3,1 /* turn on valid bit */
1583
1584#ifdef CONFIG_PPC_ISERIES
1585 li r0,-1 /* hypervisor call */
1586 li r3,1
1587 sldi r3,r3,63 /* 0x8000000000000000 */
1588 ori r3,r3,4 /* 0x8000000000000004 */
1589 sc /* HvCall_setASR */
1590#else
1591 /* set the ASR */
1592 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1593 ld r3,0(r3)
1594 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1595 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1596 beq 98f /* branch if result is 0 */
1597 mfspr r3,SPRN_PVR
1598 srwi r3,r3,16
1599 cmpwi r3,0x37 /* SStar */
1600 beq 97f
1601 cmpwi r3,0x36 /* IStar */
1602 beq 97f
1603 cmpwi r3,0x34 /* Pulsar */
1604 bne 98f
160597: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1606 HVSC /* Invoking hcall */
1607 b 99f
160898: /* !(rpa hypervisor) || !(star) */
1609 mtasr r4 /* set the stab location */
161099:
1611#endif
1612 li r7,0
1613 mtlr r7
1614
1615 /* enable MMU and jump to start_secondary */
1616 LOADADDR(r3,.start_secondary_prolog)
1617 SET_REG_TO_CONST(r4, MSR_KERNEL)
1618#ifdef DO_SOFT_DISABLE
1619 ori r4,r4,MSR_EE
1620#endif
1621 mtspr SPRN_SRR0,r3
1622 mtspr SPRN_SRR1,r4
1623 rfid
1624 b . /* prevent speculative execution */
1625
1626/*
1627 * Running with relocation on at this point. All we want to do is
1628 * zero the stack back-chain pointer before going into C code.
1629 */
1630_GLOBAL(start_secondary_prolog)
1631 li r3,0
1632 std r3,0(r1) /* Zero the stack frame pointer */
1633 bl .start_secondary
1634#endif
1635
1636/*
1637 * This subroutine clobbers r11 and r12
1638 */
1639_GLOBAL(enable_64b_mode)
1640 mfmsr r11 /* grab the current MSR */
1641 li r12,1
1642 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1643 or r11,r11,r12
1644 li r12,1
1645 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1646 or r11,r11,r12
1647 mtmsrd r11
1648 isync
1649 blr
1650
1651#ifdef CONFIG_PPC_MULTIPLATFORM
1652/*
1653 * This is where the main kernel code starts.
1654 */
1655_STATIC(start_here_multiplatform)
1656 /* get a new offset, now that the kernel has moved. */
1657 bl .reloc_offset
1658 mr r26,r3
1659
1660 /* Clear out the BSS. It may have been done in prom_init,
1661 * already but that's irrelevant since prom_init will soon
1662 * be detached from the kernel completely. Besides, we need
1663 * to clear it now for kexec-style entry.
1664 */
1665 LOADADDR(r11,__bss_stop)
1666 LOADADDR(r8,__bss_start)
1667 sub r11,r11,r8 /* bss size */
1668 addi r11,r11,7 /* round up to an even double word */
1669 rldicl. r11,r11,61,3 /* shift right by 3 */
1670 beq 4f
1671 addi r8,r8,-8
1672 li r0,0
1673 mtctr r11 /* zero this many doublewords */
16743: stdu r0,8(r8)
1675 bdnz 3b
16764:
1677
1678 mfmsr r6
1679 ori r6,r6,MSR_RI
1680 mtmsrd r6 /* RI on */
1681
1682#ifdef CONFIG_HMT
1683 /* Start up the second thread on cpu 0 */
1684 mfspr r3,SPRN_PVR
1685 srwi r3,r3,16
1686 cmpwi r3,0x34 /* Pulsar */
1687 beq 90f
1688 cmpwi r3,0x36 /* Icestar */
1689 beq 90f
1690 cmpwi r3,0x37 /* SStar */
1691 beq 90f
1692 b 91f /* HMT not supported */
169390: li r3,0
1694 bl .hmt_start_secondary
169591:
1696#endif
1697
1698 /* The following gets the stack and TOC set up with the regs */
1699 /* pointing to the real addr of the kernel stack. This is */
1700 /* all done to support the C function call below which sets */
1701 /* up the htab. This is done because we have relocated the */
1702 /* kernel but are still running in real mode. */
1703
1704 LOADADDR(r3,init_thread_union)
1705 add r3,r3,r26
1706
1707 /* set up a stack pointer (physical address) */
1708 addi r1,r3,THREAD_SIZE
1709 li r0,0
1710 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1711
1712 /* set up the TOC (physical address) */
1713 LOADADDR(r2,__toc_start)
1714 addi r2,r2,0x4000
1715 addi r2,r2,0x4000
1716 add r2,r2,r26
1717
1718 LOADADDR(r3,cpu_specs)
1719 add r3,r3,r26
1720 LOADADDR(r4,cur_cpu_spec)
1721 add r4,r4,r26
1722 mr r5,r26
1723 bl .identify_cpu
1724
1725 /* Save some low level config HIDs of CPU0 to be copied to
1726 * other CPUs later on, or used for suspend/resume
1727 */
1728 bl .__save_cpu_setup
1729 sync
1730
1731 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1732 * note that boot_cpuid can always be 0 nowadays since there is
1733 * nowhere it can be initialized differently before we reach this
1734 * code
1735 */
1736 LOADADDR(r27, boot_cpuid)
1737 add r27,r27,r26
1738 lwz r27,0(r27)
1739
1740 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1741 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1742 add r13,r13,r24 /* for this processor. */
1743 add r13,r13,r26 /* convert to physical addr */
1744 mtspr SPRN_SPRG3,r13 /* PPPBBB: Temp... -Peter */
1745
1746 /* Do very early kernel initializations, including initial hash table,
1747 * stab and slb setup before we turn on relocation. */
1748
1749 /* Restore parameters passed from prom_init/kexec */
1750 mr r3,r31
1751 bl .early_setup
1752
1753 /* set the ASR */
1754 ld r3,PACASTABREAL(r13)
1755 ori r4,r3,1 /* turn on valid bit */
1756 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1757 ld r3,0(r3)
1758 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1759 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1760 beq 98f /* branch if result is 0 */
1761 mfspr r3,SPRN_PVR
1762 srwi r3,r3,16
1763 cmpwi r3,0x37 /* SStar */
1764 beq 97f
1765 cmpwi r3,0x36 /* IStar */
1766 beq 97f
1767 cmpwi r3,0x34 /* Pulsar */
1768 bne 98f
176997: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1770 HVSC /* Invoking hcall */
1771 b 99f
177298: /* !(rpa hypervisor) || !(star) */
1773 mtasr r4 /* set the stab location */
177499:
1775 /* Set SDR1 (hash table pointer) */
1776 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1777 ld r3,0(r3)
1778 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1779 /* Test if bit 0 is set (LPAR bit) */
1780 andi. r3,r3,PLATFORM_LPAR
1781 bne 98f /* branch if result is !0 */
1782 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1783 add r6,r6,r26
1784 ld r6,0(r6) /* get the value of _SDR1 */
1785 mtspr SPRN_SDR1,r6 /* set the htab location */
178698:
1787 LOADADDR(r3,.start_here_common)
1788 SET_REG_TO_CONST(r4, MSR_KERNEL)
1789 mtspr SPRN_SRR0,r3
1790 mtspr SPRN_SRR1,r4
1791 rfid
1792 b . /* prevent speculative execution */
1793#endif /* CONFIG_PPC_MULTIPLATFORM */
1794
1795 /* This is where all platforms converge execution */
1796_STATIC(start_here_common)
1797 /* relocation is on at this point */
1798
1799 /* The following code sets up the SP and TOC now that we are */
1800 /* running with translation enabled. */
1801
1802 LOADADDR(r3,init_thread_union)
1803
1804 /* set up the stack */
1805 addi r1,r3,THREAD_SIZE
1806 li r0,0
1807 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1808
1809 /* Apply the CPUs-specific fixups (nop out sections not relevant
1810 * to this CPU
1811 */
1812 li r3,0
1813 bl .do_cpu_ftr_fixups
1814
1815 LOADADDR(r26, boot_cpuid)
1816 lwz r26,0(r26)
1817
1818 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1819 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1820 add r13,r13,r24 /* for this processor. */
1821 mtspr SPRN_SPRG3,r13
1822
1823 /* ptr to current */
1824 LOADADDR(r4,init_task)
1825 std r4,PACACURRENT(r13)
1826
1827 /* Load the TOC */
1828 ld r2,PACATOC(r13)
1829 std r1,PACAKSAVE(r13)
1830
1831 bl .setup_system
1832
1833 /* Load up the kernel context */
18345:
1835#ifdef DO_SOFT_DISABLE
1836 li r5,0
1837 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
1838 mfmsr r5
1839 ori r5,r5,MSR_EE /* Hard Enabled */
1840 mtmsrd r5
1841#endif
1842
1843 bl .start_kernel
1844
1845_GLOBAL(hmt_init)
1846#ifdef CONFIG_HMT
1847 LOADADDR(r5, hmt_thread_data)
1848 mfspr r7,SPRN_PVR
1849 srwi r7,r7,16
1850 cmpwi r7,0x34 /* Pulsar */
1851 beq 90f
1852 cmpwi r7,0x36 /* Icestar */
1853 beq 91f
1854 cmpwi r7,0x37 /* SStar */
1855 beq 91f
1856 b 101f
185790: mfspr r6,SPRN_PIR
1858 andi. r6,r6,0x1f
1859 b 92f
186091: mfspr r6,SPRN_PIR
1861 andi. r6,r6,0x3ff
186292: sldi r4,r24,3
1863 stwx r6,r5,r4
1864 bl .hmt_start_secondary
1865 b 101f
1866
1867__hmt_secondary_hold:
1868 LOADADDR(r5, hmt_thread_data)
1869 clrldi r5,r5,4
1870 li r7,0
1871 mfspr r6,SPRN_PIR
1872 mfspr r8,SPRN_PVR
1873 srwi r8,r8,16
1874 cmpwi r8,0x34
1875 bne 93f
1876 andi. r6,r6,0x1f
1877 b 103f
187893: andi. r6,r6,0x3f
1879
1880103: lwzx r8,r5,r7
1881 cmpw r8,r6
1882 beq 104f
1883 addi r7,r7,8
1884 b 103b
1885
1886104: addi r7,r7,4
1887 lwzx r9,r5,r7
1888 mr r24,r9
1889101:
1890#endif
1891 mr r3,r24
1892 b .pSeries_secondary_smp_init
1893
1894#ifdef CONFIG_HMT
1895_GLOBAL(hmt_start_secondary)
1896 LOADADDR(r4,__hmt_secondary_hold)
1897 clrldi r4,r4,4
1898 mtspr SPRN_NIADORM, r4
1899 mfspr r4, SPRN_MSRDORM
1900 li r5, -65
1901 and r4, r4, r5
1902 mtspr SPRN_MSRDORM, r4
1903 lis r4,0xffef
1904 ori r4,r4,0x7403
1905 mtspr SPRN_TSC, r4
1906 li r4,0x1f4
1907 mtspr SPRN_TST, r4
1908 mfspr r4, SPRN_HID0
1909 ori r4, r4, 0x1
1910 mtspr SPRN_HID0, r4
1911 mfspr r4, SPRN_CTRLF
1912 oris r4, r4, 0x40
1913 mtspr SPRN_CTRLT, r4
1914 blr
1915#endif
1916
1917#if defined(CONFIG_KEXEC) || defined(CONFIG_SMP)
1918_GLOBAL(smp_release_cpus)
1919 /* All secondary cpus are spinning on a common
1920 * spinloop, release them all now so they can start
1921 * to spin on their individual paca spinloops.
1922 * For non SMP kernels, the secondary cpus never
1923 * get out of the common spinloop.
1924 * XXX This does nothing useful on iSeries, secondaries are
1925 * already waiting on their paca.
1926 */
1927 li r3,1
1928 LOADADDR(r5,__secondary_hold_spinloop)
1929 std r3,0(r5)
1930 sync
1931 blr
1932#endif /* CONFIG_SMP */
1933
1934
1935/*
1936 * We put a few things here that have to be page-aligned.
1937 * This stuff goes at the beginning of the bss, which is page-aligned.
1938 */
1939 .section ".bss"
1940
1941 .align PAGE_SHIFT
1942
1943 .globl empty_zero_page
1944empty_zero_page:
1945 .space PAGE_SIZE
1946
1947 .globl swapper_pg_dir
1948swapper_pg_dir:
1949 .space PAGE_SIZE
1950
1951/*
1952 * This space gets a copy of optional info passed to us by the bootstrap
1953 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
1954 */
1955 .globl cmd_line
1956cmd_line:
1957 .space COMMAND_LINE_SIZE
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
new file mode 100644
index 000000000000..bc6d1ac55235
--- /dev/null
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -0,0 +1,860 @@
1/*
2 * arch/ppc/kernel/except_8xx.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications by Dan Malek
12 * Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 * This file contains low-level support and setup for PowerPC 8xx
15 * embedded processors, including trap and interrupt dispatch.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
24#include <linux/config.h>
25#include <asm/processor.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cache.h>
29#include <asm/pgtable.h>
30#include <asm/cputable.h>
31#include <asm/thread_info.h>
32#include <asm/ppc_asm.h>
33#include <asm/asm-offsets.h>
34
35/* Macro to make the code more readable. */
36#ifdef CONFIG_8xx_CPU6
37#define DO_8xx_CPU6(val, reg) \
38 li reg, val; \
39 stw reg, 12(r0); \
40 lwz reg, 12(r0);
41#else
42#define DO_8xx_CPU6(val, reg)
43#endif
44 .text
45 .globl _stext
46_stext:
47 .text
48 .globl _start
49_start:
50
51/* MPC8xx
52 * This port was done on an MBX board with an 860. Right now I only
53 * support an ELF compressed (zImage) boot from EPPC-Bug because the
54 * code there loads up some registers before calling us:
55 * r3: ptr to board info data
56 * r4: initrd_start or if no initrd then 0
57 * r5: initrd_end - unused if r4 is 0
58 * r6: Start of command line string
59 * r7: End of command line string
60 *
61 * I decided to use conditional compilation instead of checking PVR and
62 * adding more processor specific branches around code I don't need.
63 * Since this is an embedded processor, I also appreciate any memory
64 * savings I can get.
65 *
66 * The MPC8xx does not have any BATs, but it supports large page sizes.
67 * We first initialize the MMU to support 8M byte pages, then load one
68 * entry into each of the instruction and data TLBs to map the first
69 * 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to
70 * the "internal" processor registers before MMU_init is called.
71 *
72 * The TLB code currently contains a major hack. Since I use the condition
73 * code register, I have to save and restore it. I am out of registers, so
74 * I just store it in memory location 0 (the TLB handlers are not reentrant).
75 * To avoid making any decisions, I need to use the "segment" valid bit
76 * in the first level table, but that would require many changes to the
77 * Linux page directory/table functions that I don't want to do right now.
78 *
79 * I used to use SPRG2 for a temporary register in the TLB handler, but it
80 * has since been put to other uses. I now use a hack to save a register
81 * and the CCR at memory location 0.....Someday I'll fix this.....
82 * -- Dan
83 */
84 .globl __start
85__start:
86 mr r31,r3 /* save parameters */
87 mr r30,r4
88 mr r29,r5
89 mr r28,r6
90 mr r27,r7
91
92 /* We have to turn on the MMU right away so we get cache modes
93 * set correctly.
94 */
95 bl initial_mmu
96
97/* We now have the lower 8 Meg mapped into TLB entries, and the caches
98 * ready to work.
99 */
100
101turn_on_mmu:
102 mfmsr r0
103 ori r0,r0,MSR_DR|MSR_IR
104 mtspr SPRN_SRR1,r0
105 lis r0,start_here@h
106 ori r0,r0,start_here@l
107 mtspr SPRN_SRR0,r0
108 SYNC
109 rfi /* enables MMU */
110
111/*
112 * Exception entry code. This code runs with address translation
113 * turned off, i.e. using physical addresses.
114 * We assume sprg3 has the physical address of the current
115 * task's thread_struct.
116 */
117#define EXCEPTION_PROLOG \
118 mtspr SPRN_SPRG0,r10; \
119 mtspr SPRN_SPRG1,r11; \
120 mfcr r10; \
121 EXCEPTION_PROLOG_1; \
122 EXCEPTION_PROLOG_2
123
124#define EXCEPTION_PROLOG_1 \
125 mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
126 andi. r11,r11,MSR_PR; \
127 tophys(r11,r1); /* use tophys(r1) if kernel */ \
128 beq 1f; \
129 mfspr r11,SPRN_SPRG3; \
130 lwz r11,THREAD_INFO-THREAD(r11); \
131 addi r11,r11,THREAD_SIZE; \
132 tophys(r11,r11); \
1331: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
134
135
136#define EXCEPTION_PROLOG_2 \
137 CLR_TOP32(r11); \
138 stw r10,_CCR(r11); /* save registers */ \
139 stw r12,GPR12(r11); \
140 stw r9,GPR9(r11); \
141 mfspr r10,SPRN_SPRG0; \
142 stw r10,GPR10(r11); \
143 mfspr r12,SPRN_SPRG1; \
144 stw r12,GPR11(r11); \
145 mflr r10; \
146 stw r10,_LINK(r11); \
147 mfspr r12,SPRN_SRR0; \
148 mfspr r9,SPRN_SRR1; \
149 stw r1,GPR1(r11); \
150 stw r1,0(r11); \
151 tovirt(r1,r11); /* set new kernel sp */ \
152 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
153 MTMSRD(r10); /* (except for mach check in rtas) */ \
154 stw r0,GPR0(r11); \
155 SAVE_4GPRS(3, r11); \
156 SAVE_2GPRS(7, r11)
157
158/*
159 * Note: code which follows this uses cr0.eq (set if from kernel),
160 * r11, r12 (SRR0), and r9 (SRR1).
161 *
162 * Note2: once we have set r1 we are in a position to take exceptions
163 * again, and we could thus set MSR:RI at that point.
164 */
165
166/*
167 * Exception vectors.
168 */
169#define EXCEPTION(n, label, hdlr, xfer) \
170 . = n; \
171label: \
172 EXCEPTION_PROLOG; \
173 addi r3,r1,STACK_FRAME_OVERHEAD; \
174 xfer(n, hdlr)
175
176#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
177 li r10,trap; \
178 stw r10,_TRAP(r11); \
179 li r10,MSR_KERNEL; \
180 copyee(r10, r9); \
181 bl tfer; \
182i##n: \
183 .long hdlr; \
184 .long ret
185
186#define COPY_EE(d, s) rlwimi d,s,0,16,16
187#define NOCOPY(d, s)
188
189#define EXC_XFER_STD(n, hdlr) \
190 EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
191 ret_from_except_full)
192
193#define EXC_XFER_LITE(n, hdlr) \
194 EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
195 ret_from_except)
196
197#define EXC_XFER_EE(n, hdlr) \
198 EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
199 ret_from_except_full)
200
201#define EXC_XFER_EE_LITE(n, hdlr) \
202 EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
203 ret_from_except)
204
205/* System reset */
206 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
207
208/* Machine check */
209 . = 0x200
210MachineCheck:
211 EXCEPTION_PROLOG
212 mfspr r4,SPRN_DAR
213 stw r4,_DAR(r11)
214 mfspr r5,SPRN_DSISR
215 stw r5,_DSISR(r11)
216 addi r3,r1,STACK_FRAME_OVERHEAD
217 EXC_XFER_STD(0x200, machine_check_exception)
218
219/* Data access exception.
220 * This is "never generated" by the MPC8xx. We jump to it for other
221 * translation errors.
222 */
223 . = 0x300
224DataAccess:
225 EXCEPTION_PROLOG
226 mfspr r10,SPRN_DSISR
227 stw r10,_DSISR(r11)
228 mr r5,r10
229 mfspr r4,SPRN_DAR
230 EXC_XFER_EE_LITE(0x300, handle_page_fault)
231
232/* Instruction access exception.
233 * This is "never generated" by the MPC8xx. We jump to it for other
234 * translation errors.
235 */
236 . = 0x400
237InstructionAccess:
238 EXCEPTION_PROLOG
239 mr r4,r12
240 mr r5,r9
241 EXC_XFER_EE_LITE(0x400, handle_page_fault)
242
243/* External interrupt */
244 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
245
246/* Alignment exception */
247 . = 0x600
248Alignment:
249 EXCEPTION_PROLOG
250 mfspr r4,SPRN_DAR
251 stw r4,_DAR(r11)
252 mfspr r5,SPRN_DSISR
253 stw r5,_DSISR(r11)
254 addi r3,r1,STACK_FRAME_OVERHEAD
255 EXC_XFER_EE(0x600, alignment_exception)
256
257/* Program check exception */
258 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
259
260/* No FPU on MPC8xx. This exception is not supposed to happen.
261*/
262 EXCEPTION(0x800, FPUnavailable, unknown_exception, EXC_XFER_STD)
263
264/* Decrementer */
265 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
266
267 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
268 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
269
270/* System call */
271 . = 0xc00
272SystemCall:
273 EXCEPTION_PROLOG
274 EXC_XFER_EE_LITE(0xc00, DoSyscall)
275
276/* Single step - not used on 601 */
277 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
278 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
279 EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_EE)
280
281/* On the MPC8xx, this is a software emulation interrupt. It occurs
282 * for all unimplemented and illegal instructions.
283 */
284 EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD)
285
286 . = 0x1100
287/*
288 * For the MPC8xx, this is a software tablewalk to load the instruction
289 * TLB. It is modelled after the example in the Motorola manual. The task
290 * switch loads the M_TWB register with the pointer to the first level table.
291 * If we discover there is no second level table (value is zero) or if there
292 * is an invalid pte, we load that into the TLB, which causes another fault
293 * into the TLB Error interrupt where we can handle such problems.
294 * We have to use the MD_xxx registers for the tablewalk because the
295 * equivalent MI_xxx registers only perform the attribute functions.
296 */
297InstructionTLBMiss:
298#ifdef CONFIG_8xx_CPU6
299 stw r3, 8(r0)
300#endif
301 DO_8xx_CPU6(0x3f80, r3)
302 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
303 mfcr r10
304 stw r10, 0(r0)
305 stw r11, 4(r0)
306 mfspr r10, SPRN_SRR0 /* Get effective address of fault */
307 DO_8xx_CPU6(0x3780, r3)
308 mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */
309 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
310
311 /* If we are faulting a kernel address, we have to use the
312 * kernel page tables.
313 */
314 andi. r11, r10, 0x0800 /* Address >= 0x80000000 */
315 beq 3f
316 lis r11, swapper_pg_dir@h
317 ori r11, r11, swapper_pg_dir@l
318 rlwimi r10, r11, 0, 2, 19
3193:
320 lwz r11, 0(r10) /* Get the level 1 entry */
321 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
322 beq 2f /* If zero, don't try to find a pte */
323
324 /* We have a pte table, so load the MI_TWC with the attributes
325 * for this "segment."
326 */
327 ori r11,r11,1 /* Set valid bit */
328 DO_8xx_CPU6(0x2b80, r3)
329 mtspr SPRN_MI_TWC, r11 /* Set segment attributes */
330 DO_8xx_CPU6(0x3b80, r3)
331 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
332 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
333 lwz r10, 0(r11) /* Get the pte */
334
335 ori r10, r10, _PAGE_ACCESSED
336 stw r10, 0(r11)
337
338 /* The Linux PTE won't go exactly into the MMU TLB.
339 * Software indicator bits 21, 22 and 28 must be clear.
340 * Software indicator bits 24, 25, 26, and 27 must be
341 * set. All other Linux PTE bits control the behavior
342 * of the MMU.
343 */
3442: li r11, 0x00f0
345 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
346 DO_8xx_CPU6(0x2d80, r3)
347 mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
348
349 mfspr r10, SPRN_M_TW /* Restore registers */
350 lwz r11, 0(r0)
351 mtcr r11
352 lwz r11, 4(r0)
353#ifdef CONFIG_8xx_CPU6
354 lwz r3, 8(r0)
355#endif
356 rfi
357
358 . = 0x1200
359DataStoreTLBMiss:
360#ifdef CONFIG_8xx_CPU6
361 stw r3, 8(r0)
362#endif
363 DO_8xx_CPU6(0x3f80, r3)
364 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
365 mfcr r10
366 stw r10, 0(r0)
367 stw r11, 4(r0)
368 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
369
370 /* If we are faulting a kernel address, we have to use the
371 * kernel page tables.
372 */
373 andi. r11, r10, 0x0800
374 beq 3f
375 lis r11, swapper_pg_dir@h
376 ori r11, r11, swapper_pg_dir@l
377 rlwimi r10, r11, 0, 2, 19
3783:
379 lwz r11, 0(r10) /* Get the level 1 entry */
380 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
381 beq 2f /* If zero, don't try to find a pte */
382
383 /* We have a pte table, so load fetch the pte from the table.
384 */
385 ori r11, r11, 1 /* Set valid bit in physical L2 page */
386 DO_8xx_CPU6(0x3b80, r3)
387 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
388 mfspr r10, SPRN_MD_TWC /* ....and get the pte address */
389 lwz r10, 0(r10) /* Get the pte */
390
391 /* Insert the Guarded flag into the TWC from the Linux PTE.
392 * It is bit 27 of both the Linux PTE and the TWC (at least
393 * I got that right :-). It will be better when we can put
394 * this into the Linux pgd/pmd and load it in the operation
395 * above.
396 */
397 rlwimi r11, r10, 0, 27, 27
398 DO_8xx_CPU6(0x3b80, r3)
399 mtspr SPRN_MD_TWC, r11
400
401 mfspr r11, SPRN_MD_TWC /* get the pte address again */
402 ori r10, r10, _PAGE_ACCESSED
403 stw r10, 0(r11)
404
405 /* The Linux PTE won't go exactly into the MMU TLB.
406 * Software indicator bits 21, 22 and 28 must be clear.
407 * Software indicator bits 24, 25, 26, and 27 must be
408 * set. All other Linux PTE bits control the behavior
409 * of the MMU.
410 */
4112: li r11, 0x00f0
412 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
413 DO_8xx_CPU6(0x3d80, r3)
414 mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
415
416 mfspr r10, SPRN_M_TW /* Restore registers */
417 lwz r11, 0(r0)
418 mtcr r11
419 lwz r11, 4(r0)
420#ifdef CONFIG_8xx_CPU6
421 lwz r3, 8(r0)
422#endif
423 rfi
424
425/* This is an instruction TLB error on the MPC8xx. This could be due
426 * to many reasons, such as executing guarded memory or illegal instruction
427 * addresses. There is nothing to do but handle a big time error fault.
428 */
429 . = 0x1300
430InstructionTLBError:
431 b InstructionAccess
432
433/* This is the data TLB error on the MPC8xx. This could be due to
434 * many reasons, including a dirty update to a pte. We can catch that
435 * one here, but anything else is an error. First, we track down the
436 * Linux pte. If it is valid, write access is allowed, but the
437 * page dirty bit is not set, we will set it and reload the TLB. For
438 * any other case, we bail out to a higher level function that can
439 * handle it.
440 */
441 . = 0x1400
442DataTLBError:
443#ifdef CONFIG_8xx_CPU6
444 stw r3, 8(r0)
445#endif
446 DO_8xx_CPU6(0x3f80, r3)
447 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
448 mfcr r10
449 stw r10, 0(r0)
450 stw r11, 4(r0)
451
452 /* First, make sure this was a store operation.
453 */
454 mfspr r10, SPRN_DSISR
455 andis. r11, r10, 0x0200 /* If set, indicates store op */
456 beq 2f
457
458 /* The EA of a data TLB miss is automatically stored in the MD_EPN
459 * register. The EA of a data TLB error is automatically stored in
460 * the DAR, but not the MD_EPN register. We must copy the 20 most
461 * significant bits of the EA from the DAR to MD_EPN before we
462 * start walking the page tables. We also need to copy the CASID
463 * value from the M_CASID register.
464 * Addendum: The EA of a data TLB error is _supposed_ to be stored
465 * in DAR, but it seems that this doesn't happen in some cases, such
466 * as when the error is due to a dcbi instruction to a page with a
467 * TLB that doesn't have the changed bit set. In such cases, there
468 * does not appear to be any way to recover the EA of the error
469 * since it is neither in DAR nor MD_EPN. As a workaround, the
470 * _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs
471 * are initialized in mapin_ram(). This will avoid the problem,
472 * assuming we only use the dcbi instruction on kernel addresses.
473 */
474 mfspr r10, SPRN_DAR
475 rlwinm r11, r10, 0, 0, 19
476 ori r11, r11, MD_EVALID
477 mfspr r10, SPRN_M_CASID
478 rlwimi r11, r10, 0, 28, 31
479 DO_8xx_CPU6(0x3780, r3)
480 mtspr SPRN_MD_EPN, r11
481
482 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
483
484 /* If we are faulting a kernel address, we have to use the
485 * kernel page tables.
486 */
487 andi. r11, r10, 0x0800
488 beq 3f
489 lis r11, swapper_pg_dir@h
490 ori r11, r11, swapper_pg_dir@l
491 rlwimi r10, r11, 0, 2, 19
4923:
493 lwz r11, 0(r10) /* Get the level 1 entry */
494 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
495 beq 2f /* If zero, bail */
496
497 /* We have a pte table, so fetch the pte from the table.
498 */
499 ori r11, r11, 1 /* Set valid bit in physical L2 page */
500 DO_8xx_CPU6(0x3b80, r3)
501 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
502 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
503 lwz r10, 0(r11) /* Get the pte */
504
505 andi. r11, r10, _PAGE_RW /* Is it writeable? */
506 beq 2f /* Bail out if not */
507
508 /* Update 'changed', among others.
509 */
510 ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
511 mfspr r11, SPRN_MD_TWC /* Get pte address again */
512 stw r10, 0(r11) /* and update pte in table */
513
514 /* The Linux PTE won't go exactly into the MMU TLB.
515 * Software indicator bits 21, 22 and 28 must be clear.
516 * Software indicator bits 24, 25, 26, and 27 must be
517 * set. All other Linux PTE bits control the behavior
518 * of the MMU.
519 */
520 li r11, 0x00f0
521 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
522 DO_8xx_CPU6(0x3d80, r3)
523 mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
524
525 mfspr r10, SPRN_M_TW /* Restore registers */
526 lwz r11, 0(r0)
527 mtcr r11
528 lwz r11, 4(r0)
529#ifdef CONFIG_8xx_CPU6
530 lwz r3, 8(r0)
531#endif
532 rfi
5332:
534 mfspr r10, SPRN_M_TW /* Restore registers */
535 lwz r11, 0(r0)
536 mtcr r11
537 lwz r11, 4(r0)
538#ifdef CONFIG_8xx_CPU6
539 lwz r3, 8(r0)
540#endif
541 b DataAccess
542
543 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
544 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
545 EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
546 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
547 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
548 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
549 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
550
551/* On the MPC8xx, these next four traps are used for development
552 * support of breakpoints and such. Someday I will get around to
553 * using them.
554 */
555 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
556 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
557 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
558 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
559
560 . = 0x2000
561
562 .globl giveup_fpu
563giveup_fpu:
564 blr
565
566/*
567 * This is where the main kernel code starts.
568 */
569start_here:
570 /* ptr to current */
571 lis r2,init_task@h
572 ori r2,r2,init_task@l
573
574 /* ptr to phys current thread */
575 tophys(r4,r2)
576 addi r4,r4,THREAD /* init task's THREAD */
577 mtspr SPRN_SPRG3,r4
578 li r3,0
579 mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */
580
581 /* stack */
582 lis r1,init_thread_union@ha
583 addi r1,r1,init_thread_union@l
584 li r0,0
585 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
586
587 bl early_init /* We have to do this with MMU on */
588
589/*
590 * Decide what sort of machine this is and initialize the MMU.
591 */
592 mr r3,r31
593 mr r4,r30
594 mr r5,r29
595 mr r6,r28
596 mr r7,r27
597 bl machine_init
598 bl MMU_init
599
600/*
601 * Go back to running unmapped so we can load up new values
602 * and change to using our exception vectors.
603 * On the 8xx, all we have to do is invalidate the TLB to clear
604 * the old 8M byte TLB mappings and load the page table base register.
605 */
606 /* The right way to do this would be to track it down through
607 * init's THREAD like the context switch code does, but this is
608 * easier......until someone changes init's static structures.
609 */
610 lis r6, swapper_pg_dir@h
611 ori r6, r6, swapper_pg_dir@l
612 tophys(r6,r6)
613#ifdef CONFIG_8xx_CPU6
614 lis r4, cpu6_errata_word@h
615 ori r4, r4, cpu6_errata_word@l
616 li r3, 0x3980
617 stw r3, 12(r4)
618 lwz r3, 12(r4)
619#endif
620 mtspr SPRN_M_TWB, r6
621 lis r4,2f@h
622 ori r4,r4,2f@l
623 tophys(r4,r4)
624 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
625 mtspr SPRN_SRR0,r4
626 mtspr SPRN_SRR1,r3
627 rfi
628/* Load up the kernel context */
6292:
630 SYNC /* Force all PTE updates to finish */
631 tlbia /* Clear all TLB entries */
632 sync /* wait for tlbia/tlbie to finish */
633 TLBSYNC /* ... on all CPUs */
634
635 /* set up the PTE pointers for the Abatron bdiGDB.
636 */
637 tovirt(r6,r6)
638 lis r5, abatron_pteptrs@h
639 ori r5, r5, abatron_pteptrs@l
640 stw r5, 0xf0(r0) /* Must match your Abatron config file */
641 tophys(r5,r5)
642 stw r6, 0(r5)
643
644/* Now turn on the MMU for real! */
645 li r4,MSR_KERNEL
646 lis r3,start_kernel@h
647 ori r3,r3,start_kernel@l
648 mtspr SPRN_SRR0,r3
649 mtspr SPRN_SRR1,r4
650 rfi /* enable MMU and jump to start_kernel */
651
652/* Set up the initial MMU state so we can do the first level of
653 * kernel initialization. This maps the first 8 MBytes of memory 1:1
654 * virtual to physical. Also, set the cache mode since that is defined
655 * by TLB entries and perform any additional mapping (like of the IMMR).
656 * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel,
657 * 24 Mbytes of data, and the 8M IMMR space. Anything not covered by
658 * these mappings is mapped by page tables.
659 */
660initial_mmu:
661 tlbia /* Invalidate all TLB entries */
662#ifdef CONFIG_PIN_TLB
663 lis r8, MI_RSV4I@h
664 ori r8, r8, 0x1c00
665#else
666 li r8, 0
667#endif
668 mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
669
670#ifdef CONFIG_PIN_TLB
671 lis r10, (MD_RSV4I | MD_RESETVAL)@h
672 ori r10, r10, 0x1c00
673 mr r8, r10
674#else
675 lis r10, MD_RESETVAL@h
676#endif
677#ifndef CONFIG_8xx_COPYBACK
678 oris r10, r10, MD_WTDEF@h
679#endif
680 mtspr SPRN_MD_CTR, r10 /* Set data TLB control */
681
682 /* Now map the lower 8 Meg into the TLBs. For this quick hack,
683 * we can load the instruction and data TLB registers with the
684 * same values.
685 */
686 lis r8, KERNELBASE@h /* Create vaddr for TLB */
687 ori r8, r8, MI_EVALID /* Mark it valid */
688 mtspr SPRN_MI_EPN, r8
689 mtspr SPRN_MD_EPN, r8
690 li r8, MI_PS8MEG /* Set 8M byte page */
691 ori r8, r8, MI_SVALID /* Make it valid */
692 mtspr SPRN_MI_TWC, r8
693 mtspr SPRN_MD_TWC, r8
694 li r8, MI_BOOTINIT /* Create RPN for address 0 */
695 mtspr SPRN_MI_RPN, r8 /* Store TLB entry */
696 mtspr SPRN_MD_RPN, r8
697 lis r8, MI_Kp@h /* Set the protection mode */
698 mtspr SPRN_MI_AP, r8
699 mtspr SPRN_MD_AP, r8
700
701 /* Map another 8 MByte at the IMMR to get the processor
702 * internal registers (among other things).
703 */
704#ifdef CONFIG_PIN_TLB
705 addi r10, r10, 0x0100
706 mtspr SPRN_MD_CTR, r10
707#endif
708 mfspr r9, 638 /* Get current IMMR */
709 andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */
710
711 mr r8, r9 /* Create vaddr for TLB */
712 ori r8, r8, MD_EVALID /* Mark it valid */
713 mtspr SPRN_MD_EPN, r8
714 li r8, MD_PS8MEG /* Set 8M byte page */
715 ori r8, r8, MD_SVALID /* Make it valid */
716 mtspr SPRN_MD_TWC, r8
717 mr r8, r9 /* Create paddr for TLB */
718 ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
719 mtspr SPRN_MD_RPN, r8
720
721#ifdef CONFIG_PIN_TLB
722 /* Map two more 8M kernel data pages.
723 */
724 addi r10, r10, 0x0100
725 mtspr SPRN_MD_CTR, r10
726
727 lis r8, KERNELBASE@h /* Create vaddr for TLB */
728 addis r8, r8, 0x0080 /* Add 8M */
729 ori r8, r8, MI_EVALID /* Mark it valid */
730 mtspr SPRN_MD_EPN, r8
731 li r9, MI_PS8MEG /* Set 8M byte page */
732 ori r9, r9, MI_SVALID /* Make it valid */
733 mtspr SPRN_MD_TWC, r9
734 li r11, MI_BOOTINIT /* Create RPN for address 0 */
735 addis r11, r11, 0x0080 /* Add 8M */
736 mtspr SPRN_MD_RPN, r8
737
738 addis r8, r8, 0x0080 /* Add 8M */
739 mtspr SPRN_MD_EPN, r8
740 mtspr SPRN_MD_TWC, r9
741 addis r11, r11, 0x0080 /* Add 8M */
742 mtspr SPRN_MD_RPN, r8
743#endif
744
745 /* Since the cache is enabled according to the information we
746 * just loaded into the TLB, invalidate and enable the caches here.
747 * We should probably check/set other modes....later.
748 */
749 lis r8, IDC_INVALL@h
750 mtspr SPRN_IC_CST, r8
751 mtspr SPRN_DC_CST, r8
752 lis r8, IDC_ENABLE@h
753 mtspr SPRN_IC_CST, r8
754#ifdef CONFIG_8xx_COPYBACK
755 mtspr SPRN_DC_CST, r8
756#else
757 /* For a debug option, I left this here to easily enable
758 * the write through cache mode
759 */
760 lis r8, DC_SFWT@h
761 mtspr SPRN_DC_CST, r8
762 lis r8, IDC_ENABLE@h
763 mtspr SPRN_DC_CST, r8
764#endif
765 blr
766
767
768/*
769 * Set up to use a given MMU context.
770 * r3 is context number, r4 is PGD pointer.
771 *
772 * We place the physical address of the new task page directory loaded
773 * into the MMU base register, and set the ASID compare register with
774 * the new "context."
775 */
776_GLOBAL(set_context)
777
778#ifdef CONFIG_BDI_SWITCH
779 /* Context switch the PTE pointer for the Abatron BDI2000.
780 * The PGDIR is passed as second argument.
781 */
782 lis r5, KERNELBASE@h
783 lwz r5, 0xf0(r5)
784 stw r4, 0x4(r5)
785#endif
786
787#ifdef CONFIG_8xx_CPU6
788 lis r6, cpu6_errata_word@h
789 ori r6, r6, cpu6_errata_word@l
790 tophys (r4, r4)
791 li r7, 0x3980
792 stw r7, 12(r6)
793 lwz r7, 12(r6)
794 mtspr SPRN_M_TWB, r4 /* Update MMU base address */
795 li r7, 0x3380
796 stw r7, 12(r6)
797 lwz r7, 12(r6)
798 mtspr SPRN_M_CASID, r3 /* Update context */
799#else
800 mtspr SPRN_M_CASID,r3 /* Update context */
801 tophys (r4, r4)
802 mtspr SPRN_M_TWB, r4 /* and pgd */
803#endif
804 SYNC
805 blr
806
807#ifdef CONFIG_8xx_CPU6
808/* It's here because it is unique to the 8xx.
809 * It is important we get called with interrupts disabled. I used to
810 * do that, but it appears that all code that calls this already had
811 * interrupt disabled.
812 */
813 .globl set_dec_cpu6
814set_dec_cpu6:
815 lis r7, cpu6_errata_word@h
816 ori r7, r7, cpu6_errata_word@l
817 li r4, 0x2c00
818 stw r4, 8(r7)
819 lwz r4, 8(r7)
820 mtspr 22, r3 /* Update Decrementer */
821 SYNC
822 blr
823#endif
824
825/*
826 * We put a few things here that have to be page-aligned.
827 * This stuff goes at the beginning of the data segment,
828 * which is page-aligned.
829 */
830 .data
831 .globl sdata
832sdata:
833 .globl empty_zero_page
834empty_zero_page:
835 .space 4096
836
837 .globl swapper_pg_dir
838swapper_pg_dir:
839 .space 4096
840
841/*
842 * This space gets a copy of optional info passed to us by the bootstrap
843 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
844 */
845 .globl cmd_line
846cmd_line:
847 .space 512
848
849/* Room for two PTE table poiners, usually the kernel and current user
850 * pointer to their respective root page table (pgdir).
851 */
852abatron_pteptrs:
853 .space 8
854
855#ifdef CONFIG_8xx_CPU6
856 .globl cpu6_errata_word
857cpu6_errata_word:
858 .space 16
859#endif
860
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
new file mode 100644
index 000000000000..5063c603fad4
--- /dev/null
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -0,0 +1,1063 @@
1/*
2 * arch/ppc/kernel/head_fsl_booke.S
3 *
4 * Kernel execution entry point code.
5 *
6 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
7 * Initial PowerPC version.
8 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Rewritten for PReP
10 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
11 * Low-level exception handers, MMU support, and rewrite.
12 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
13 * PowerPC 8xx modifications.
14 * Copyright (c) 1998-1999 TiVo, Inc.
15 * PowerPC 403GCX modifications.
16 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
17 * PowerPC 403GCX/405GP modifications.
18 * Copyright 2000 MontaVista Software Inc.
19 * PPC405 modifications
20 * PowerPC 403GCX/405GP modifications.
21 * Author: MontaVista Software, Inc.
22 * frank_rowand@mvista.com or source@mvista.com
23 * debbie_chu@mvista.com
24 * Copyright 2002-2004 MontaVista Software, Inc.
25 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
26 * Copyright 2004 Freescale Semiconductor, Inc
27 * PowerPC e500 modifications, Kumar Gala <kumar.gala@freescale.com>
28 *
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the
31 * Free Software Foundation; either version 2 of the License, or (at your
32 * option) any later version.
33 */
34
35#include <linux/config.h>
36#include <linux/threads.h>
37#include <asm/processor.h>
38#include <asm/page.h>
39#include <asm/mmu.h>
40#include <asm/pgtable.h>
41#include <asm/cputable.h>
42#include <asm/thread_info.h>
43#include <asm/ppc_asm.h>
44#include <asm/asm-offsets.h>
45#include "head_booke.h"
46
47/* As with the other PowerPC ports, it is expected that when code
48 * execution begins here, the following registers contain valid, yet
49 * optional, information:
50 *
51 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
52 * r4 - Starting address of the init RAM disk
53 * r5 - Ending address of the init RAM disk
54 * r6 - Start of kernel command line string (e.g. "mem=128")
55 * r7 - End of kernel command line string
56 *
57 */
58 .text
59_GLOBAL(_stext)
60_GLOBAL(_start)
61 /*
62 * Reserve a word at a fixed location to store the address
63 * of abatron_pteptrs
64 */
65 nop
66/*
67 * Save parameters we are passed
68 */
69 mr r31,r3
70 mr r30,r4
71 mr r29,r5
72 mr r28,r6
73 mr r27,r7
74 li r24,0 /* CPU number */
75
76/* We try to not make any assumptions about how the boot loader
77 * setup or used the TLBs. We invalidate all mappings from the
78 * boot loader and load a single entry in TLB1[0] to map the
79 * first 16M of kernel memory. Any boot info passed from the
80 * bootloader needs to live in this first 16M.
81 *
82 * Requirement on bootloader:
83 * - The page we're executing in needs to reside in TLB1 and
84 * have IPROT=1. If not an invalidate broadcast could
85 * evict the entry we're currently executing in.
86 *
87 * r3 = Index of TLB1 were executing in
88 * r4 = Current MSR[IS]
89 * r5 = Index of TLB1 temp mapping
90 *
91 * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0]
92 * if needed
93 */
94
95/* 1. Find the index of the entry we're executing in */
96 bl invstr /* Find our address */
97invstr: mflr r6 /* Make it accessible */
98 mfmsr r7
99 rlwinm r4,r7,27,31,31 /* extract MSR[IS] */
100 mfspr r7, SPRN_PID0
101 slwi r7,r7,16
102 or r7,r7,r4
103 mtspr SPRN_MAS6,r7
104 tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */
105#ifndef CONFIG_E200
106 mfspr r7,SPRN_MAS1
107 andis. r7,r7,MAS1_VALID@h
108 bne match_TLB
109 mfspr r7,SPRN_PID1
110 slwi r7,r7,16
111 or r7,r7,r4
112 mtspr SPRN_MAS6,r7
113 tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */
114 mfspr r7,SPRN_MAS1
115 andis. r7,r7,MAS1_VALID@h
116 bne match_TLB
117 mfspr r7, SPRN_PID2
118 slwi r7,r7,16
119 or r7,r7,r4
120 mtspr SPRN_MAS6,r7
121 tlbsx 0,r6 /* Fall through, we had to match */
122#endif
123match_TLB:
124 mfspr r7,SPRN_MAS0
125 rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */
126
127 mfspr r7,SPRN_MAS1 /* Insure IPROT set */
128 oris r7,r7,MAS1_IPROT@h
129 mtspr SPRN_MAS1,r7
130 tlbwe
131
132/* 2. Invalidate all entries except the entry we're executing in */
133 mfspr r9,SPRN_TLB1CFG
134 andi. r9,r9,0xfff
135 li r6,0 /* Set Entry counter to 0 */
1361: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
137 rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
138 mtspr SPRN_MAS0,r7
139 tlbre
140 mfspr r7,SPRN_MAS1
141 rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
142 cmpw r3,r6
143 beq skpinv /* Dont update the current execution TLB */
144 mtspr SPRN_MAS1,r7
145 tlbwe
146 isync
147skpinv: addi r6,r6,1 /* Increment */
148 cmpw r6,r9 /* Are we done? */
149 bne 1b /* If not, repeat */
150
151 /* Invalidate TLB0 */
152 li r6,0x04
153 tlbivax 0,r6
154#ifdef CONFIG_SMP
155 tlbsync
156#endif
157 /* Invalidate TLB1 */
158 li r6,0x0c
159 tlbivax 0,r6
160#ifdef CONFIG_SMP
161 tlbsync
162#endif
163 msync
164
165/* 3. Setup a temp mapping and jump to it */
166 andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */
167 addi r5, r5, 0x1
168 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
169 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
170 mtspr SPRN_MAS0,r7
171 tlbre
172
173 /* Just modify the entry ID and EPN for the temp mapping */
174 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
175 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
176 mtspr SPRN_MAS0,r7
177 xori r6,r4,1 /* Setup TMP mapping in the other Address space */
178 slwi r6,r6,12
179 oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h
180 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l
181 mtspr SPRN_MAS1,r6
182 mfspr r6,SPRN_MAS2
183 li r7,0 /* temp EPN = 0 */
184 rlwimi r7,r6,0,20,31
185 mtspr SPRN_MAS2,r7
186 tlbwe
187
188 xori r6,r4,1
189 slwi r6,r6,5 /* setup new context with other address space */
190 bl 1f /* Find our address */
1911: mflr r9
192 rlwimi r7,r9,0,20,31
193 addi r7,r7,24
194 mtspr SPRN_SRR0,r7
195 mtspr SPRN_SRR1,r6
196 rfi
197
198/* 4. Clear out PIDs & Search info */
199 li r6,0
200 mtspr SPRN_PID0,r6
201#ifndef CONFIG_E200
202 mtspr SPRN_PID1,r6
203 mtspr SPRN_PID2,r6
204#endif
205 mtspr SPRN_MAS6,r6
206
207/* 5. Invalidate mapping we started in */
208 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
209 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
210 mtspr SPRN_MAS0,r7
211 tlbre
212 li r6,0
213 mtspr SPRN_MAS1,r6
214 tlbwe
215 /* Invalidate TLB1 */
216 li r9,0x0c
217 tlbivax 0,r9
218#ifdef CONFIG_SMP
219 tlbsync
220#endif
221 msync
222
223/* 6. Setup KERNELBASE mapping in TLB1[0] */
224 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
225 mtspr SPRN_MAS0,r6
226 lis r6,(MAS1_VALID|MAS1_IPROT)@h
227 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l
228 mtspr SPRN_MAS1,r6
229 li r7,0
230 lis r6,KERNELBASE@h
231 ori r6,r6,KERNELBASE@l
232 rlwimi r6,r7,0,20,31
233 mtspr SPRN_MAS2,r6
234 li r7,(MAS3_SX|MAS3_SW|MAS3_SR)
235 mtspr SPRN_MAS3,r7
236 tlbwe
237
238/* 7. Jump to KERNELBASE mapping */
239 lis r7,MSR_KERNEL@h
240 ori r7,r7,MSR_KERNEL@l
241 bl 1f /* Find our address */
2421: mflr r9
243 rlwimi r6,r9,0,20,31
244 addi r6,r6,24
245 mtspr SPRN_SRR0,r6
246 mtspr SPRN_SRR1,r7
247 rfi /* start execution out of TLB1[0] entry */
248
249/* 8. Clear out the temp mapping */
250 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
251 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
252 mtspr SPRN_MAS0,r7
253 tlbre
254 mtspr SPRN_MAS1,r8
255 tlbwe
256 /* Invalidate TLB1 */
257 li r9,0x0c
258 tlbivax 0,r9
259#ifdef CONFIG_SMP
260 tlbsync
261#endif
262 msync
263
264 /* Establish the interrupt vector offsets */
265 SET_IVOR(0, CriticalInput);
266 SET_IVOR(1, MachineCheck);
267 SET_IVOR(2, DataStorage);
268 SET_IVOR(3, InstructionStorage);
269 SET_IVOR(4, ExternalInput);
270 SET_IVOR(5, Alignment);
271 SET_IVOR(6, Program);
272 SET_IVOR(7, FloatingPointUnavailable);
273 SET_IVOR(8, SystemCall);
274 SET_IVOR(9, AuxillaryProcessorUnavailable);
275 SET_IVOR(10, Decrementer);
276 SET_IVOR(11, FixedIntervalTimer);
277 SET_IVOR(12, WatchdogTimer);
278 SET_IVOR(13, DataTLBError);
279 SET_IVOR(14, InstructionTLBError);
280 SET_IVOR(15, Debug);
281 SET_IVOR(32, SPEUnavailable);
282 SET_IVOR(33, SPEFloatingPointData);
283 SET_IVOR(34, SPEFloatingPointRound);
284#ifndef CONFIG_E200
285 SET_IVOR(35, PerformanceMonitor);
286#endif
287
288 /* Establish the interrupt vector base */
289 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
290 mtspr SPRN_IVPR,r4
291
292 /* Setup the defaults for TLB entries */
293 li r2,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l
294#ifdef CONFIG_E200
295 oris r2,r2,MAS4_TLBSELD(1)@h
296#endif
297 mtspr SPRN_MAS4, r2
298
299#if 0
300 /* Enable DOZE */
301 mfspr r2,SPRN_HID0
302 oris r2,r2,HID0_DOZE@h
303 mtspr SPRN_HID0, r2
304#endif
305#ifdef CONFIG_E200
306 /* enable dedicated debug exception handling resources (Debug APU) */
307 mfspr r2,SPRN_HID0
308 ori r2,r2,HID0_DAPUEN@l
309 mtspr SPRN_HID0,r2
310#endif
311
312#if !defined(CONFIG_BDI_SWITCH)
313 /*
314 * The Abatron BDI JTAG debugger does not tolerate others
315 * mucking with the debug registers.
316 */
317 lis r2,DBCR0_IDM@h
318 mtspr SPRN_DBCR0,r2
319 /* clear any residual debug events */
320 li r2,-1
321 mtspr SPRN_DBSR,r2
322#endif
323
324 /*
325 * This is where the main kernel code starts.
326 */
327
328 /* ptr to current */
329 lis r2,init_task@h
330 ori r2,r2,init_task@l
331
332 /* ptr to current thread */
333 addi r4,r2,THREAD /* init task's THREAD */
334 mtspr SPRN_SPRG3,r4
335
336 /* stack */
337 lis r1,init_thread_union@h
338 ori r1,r1,init_thread_union@l
339 li r0,0
340 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
341
342 bl early_init
343
344 mfspr r3,SPRN_TLB1CFG
345 andi. r3,r3,0xfff
346 lis r4,num_tlbcam_entries@ha
347 stw r3,num_tlbcam_entries@l(r4)
348/*
349 * Decide what sort of machine this is and initialize the MMU.
350 */
351 mr r3,r31
352 mr r4,r30
353 mr r5,r29
354 mr r6,r28
355 mr r7,r27
356 bl machine_init
357 bl MMU_init
358
359 /* Setup PTE pointers for the Abatron bdiGDB */
360 lis r6, swapper_pg_dir@h
361 ori r6, r6, swapper_pg_dir@l
362 lis r5, abatron_pteptrs@h
363 ori r5, r5, abatron_pteptrs@l
364 lis r4, KERNELBASE@h
365 ori r4, r4, KERNELBASE@l
366 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
367 stw r6, 0(r5)
368
369 /* Let's move on */
370 lis r4,start_kernel@h
371 ori r4,r4,start_kernel@l
372 lis r3,MSR_KERNEL@h
373 ori r3,r3,MSR_KERNEL@l
374 mtspr SPRN_SRR0,r4
375 mtspr SPRN_SRR1,r3
376 rfi /* change context and jump to start_kernel */
377
378/* Macros to hide the PTE size differences
379 *
380 * FIND_PTE -- walks the page tables given EA & pgdir pointer
381 * r10 -- EA of fault
382 * r11 -- PGDIR pointer
383 * r12 -- free
384 * label 2: is the bailout case
385 *
386 * if we find the pte (fall through):
387 * r11 is low pte word
388 * r12 is pointer to the pte
389 */
390#ifdef CONFIG_PTE_64BIT
391#define PTE_FLAGS_OFFSET 4
392#define FIND_PTE \
393 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
394 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
395 rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \
396 beq 2f; /* Bail if no table */ \
397 rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
398 lwz r11, 4(r12); /* Get pte entry */
399#else
400#define PTE_FLAGS_OFFSET 0
401#define FIND_PTE \
402 rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \
403 lwz r11, 0(r11); /* Get L1 entry */ \
404 rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \
405 beq 2f; /* Bail if no table */ \
406 rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \
407 lwz r11, 0(r12); /* Get Linux PTE */
408#endif
409
410/*
411 * Interrupt vector entry code
412 *
413 * The Book E MMUs are always on so we don't need to handle
414 * interrupts in real mode as with previous PPC processors. In
415 * this case we handle interrupts in the kernel virtual address
416 * space.
417 *
418 * Interrupt vectors are dynamically placed relative to the
419 * interrupt prefix as determined by the address of interrupt_base.
420 * The interrupt vectors offsets are programmed using the labels
421 * for each interrupt vector entry.
422 *
423 * Interrupt vectors must be aligned on a 16 byte boundary.
424 * We align on a 32 byte cache line boundary for good measure.
425 */
426
427interrupt_base:
428 /* Critical Input Interrupt */
429 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
430
431 /* Machine Check Interrupt */
432#ifdef CONFIG_E200
433 /* no RFMCI, MCSRRs on E200 */
434 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
435#else
436 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
437#endif
438
439 /* Data Storage Interrupt */
440 START_EXCEPTION(DataStorage)
441 mtspr SPRN_SPRG0, r10 /* Save some working registers */
442 mtspr SPRN_SPRG1, r11
443 mtspr SPRN_SPRG4W, r12
444 mtspr SPRN_SPRG5W, r13
445 mfcr r11
446 mtspr SPRN_SPRG7W, r11
447
448 /*
449 * Check if it was a store fault, if not then bail
450 * because a user tried to access a kernel or
451 * read-protected page. Otherwise, get the
452 * offending address and handle it.
453 */
454 mfspr r10, SPRN_ESR
455 andis. r10, r10, ESR_ST@h
456 beq 2f
457
458 mfspr r10, SPRN_DEAR /* Get faulting address */
459
460 /* If we are faulting a kernel address, we have to use the
461 * kernel page tables.
462 */
463 lis r11, TASK_SIZE@h
464 ori r11, r11, TASK_SIZE@l
465 cmplw 0, r10, r11
466 bge 2f
467
468 /* Get the PGD for the current thread */
4693:
470 mfspr r11,SPRN_SPRG3
471 lwz r11,PGDIR(r11)
4724:
473 FIND_PTE
474
475 /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */
476 andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE
477 cmpwi 0, r13, _PAGE_RW|_PAGE_USER
478 bne 2f /* Bail if not */
479
480 /* Update 'changed'. */
481 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
482 stw r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */
483
484 /* MAS2 not updated as the entry does exist in the tlb, this
485 fault taken to detect state transition (eg: COW -> DIRTY)
486 */
487 andi. r11, r11, _PAGE_HWEXEC
488 rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */
489 ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */
490
491 /* update search PID in MAS6, AS = 0 */
492 mfspr r12, SPRN_PID0
493 slwi r12, r12, 16
494 mtspr SPRN_MAS6, r12
495
496 /* find the TLB index that caused the fault. It has to be here. */
497 tlbsx 0, r10
498
499 /* only update the perm bits, assume the RPN is fine */
500 mfspr r12, SPRN_MAS3
501 rlwimi r12, r11, 0, 20, 31
502 mtspr SPRN_MAS3,r12
503 tlbwe
504
505 /* Done...restore registers and get out of here. */
506 mfspr r11, SPRN_SPRG7R
507 mtcr r11
508 mfspr r13, SPRN_SPRG5R
509 mfspr r12, SPRN_SPRG4R
510 mfspr r11, SPRN_SPRG1
511 mfspr r10, SPRN_SPRG0
512 rfi /* Force context change */
513
5142:
515 /*
516 * The bailout. Restore registers to pre-exception conditions
517 * and call the heavyweights to help us out.
518 */
519 mfspr r11, SPRN_SPRG7R
520 mtcr r11
521 mfspr r13, SPRN_SPRG5R
522 mfspr r12, SPRN_SPRG4R
523 mfspr r11, SPRN_SPRG1
524 mfspr r10, SPRN_SPRG0
525 b data_access
526
527 /* Instruction Storage Interrupt */
528 INSTRUCTION_STORAGE_EXCEPTION
529
530 /* External Input Interrupt */
531 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
532
533 /* Alignment Interrupt */
534 ALIGNMENT_EXCEPTION
535
536 /* Program Interrupt */
537 PROGRAM_EXCEPTION
538
539 /* Floating Point Unavailable Interrupt */
540#ifdef CONFIG_PPC_FPU
541 FP_UNAVAILABLE_EXCEPTION
542#else
543#ifdef CONFIG_E200
544 /* E200 treats 'normal' floating point instructions as FP Unavail exception */
545 EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
546#else
547 EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
548#endif
549#endif
550
551 /* System Call Interrupt */
552 START_EXCEPTION(SystemCall)
553 NORMAL_EXCEPTION_PROLOG
554 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
555
556 /* Auxillary Processor Unavailable Interrupt */
557 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
558
559 /* Decrementer Interrupt */
560 DECREMENTER_EXCEPTION
561
562 /* Fixed Internal Timer Interrupt */
563 /* TODO: Add FIT support */
564 EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
565
566 /* Watchdog Timer Interrupt */
567#ifdef CONFIG_BOOKE_WDT
568 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
569#else
570 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
571#endif
572
573 /* Data TLB Error Interrupt */
574 START_EXCEPTION(DataTLBError)
575 mtspr SPRN_SPRG0, r10 /* Save some working registers */
576 mtspr SPRN_SPRG1, r11
577 mtspr SPRN_SPRG4W, r12
578 mtspr SPRN_SPRG5W, r13
579 mfcr r11
580 mtspr SPRN_SPRG7W, r11
581 mfspr r10, SPRN_DEAR /* Get faulting address */
582
583 /* If we are faulting a kernel address, we have to use the
584 * kernel page tables.
585 */
586 lis r11, TASK_SIZE@h
587 ori r11, r11, TASK_SIZE@l
588 cmplw 5, r10, r11
589 blt 5, 3f
590 lis r11, swapper_pg_dir@h
591 ori r11, r11, swapper_pg_dir@l
592
593 mfspr r12,SPRN_MAS1 /* Set TID to 0 */
594 rlwinm r12,r12,0,16,1
595 mtspr SPRN_MAS1,r12
596
597 b 4f
598
599 /* Get the PGD for the current thread */
6003:
601 mfspr r11,SPRN_SPRG3
602 lwz r11,PGDIR(r11)
603
6044:
605 FIND_PTE
606 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
607 beq 2f /* Bail if not present */
608
609#ifdef CONFIG_PTE_64BIT
610 lwz r13, 0(r12)
611#endif
612 ori r11, r11, _PAGE_ACCESSED
613 stw r11, PTE_FLAGS_OFFSET(r12)
614
615 /* Jump to common tlb load */
616 b finish_tlb_load
6172:
618 /* The bailout. Restore registers to pre-exception conditions
619 * and call the heavyweights to help us out.
620 */
621 mfspr r11, SPRN_SPRG7R
622 mtcr r11
623 mfspr r13, SPRN_SPRG5R
624 mfspr r12, SPRN_SPRG4R
625 mfspr r11, SPRN_SPRG1
626 mfspr r10, SPRN_SPRG0
627 b data_access
628
629 /* Instruction TLB Error Interrupt */
630 /*
631 * Nearly the same as above, except we get our
632 * information from different registers and bailout
633 * to a different point.
634 */
635 START_EXCEPTION(InstructionTLBError)
636 mtspr SPRN_SPRG0, r10 /* Save some working registers */
637 mtspr SPRN_SPRG1, r11
638 mtspr SPRN_SPRG4W, r12
639 mtspr SPRN_SPRG5W, r13
640 mfcr r11
641 mtspr SPRN_SPRG7W, r11
642 mfspr r10, SPRN_SRR0 /* Get faulting address */
643
644 /* If we are faulting a kernel address, we have to use the
645 * kernel page tables.
646 */
647 lis r11, TASK_SIZE@h
648 ori r11, r11, TASK_SIZE@l
649 cmplw 5, r10, r11
650 blt 5, 3f
651 lis r11, swapper_pg_dir@h
652 ori r11, r11, swapper_pg_dir@l
653
654 mfspr r12,SPRN_MAS1 /* Set TID to 0 */
655 rlwinm r12,r12,0,16,1
656 mtspr SPRN_MAS1,r12
657
658 b 4f
659
660 /* Get the PGD for the current thread */
6613:
662 mfspr r11,SPRN_SPRG3
663 lwz r11,PGDIR(r11)
664
6654:
666 FIND_PTE
667 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
668 beq 2f /* Bail if not present */
669
670#ifdef CONFIG_PTE_64BIT
671 lwz r13, 0(r12)
672#endif
673 ori r11, r11, _PAGE_ACCESSED
674 stw r11, PTE_FLAGS_OFFSET(r12)
675
676 /* Jump to common TLB load point */
677 b finish_tlb_load
678
6792:
680 /* The bailout. Restore registers to pre-exception conditions
681 * and call the heavyweights to help us out.
682 */
683 mfspr r11, SPRN_SPRG7R
684 mtcr r11
685 mfspr r13, SPRN_SPRG5R
686 mfspr r12, SPRN_SPRG4R
687 mfspr r11, SPRN_SPRG1
688 mfspr r10, SPRN_SPRG0
689 b InstructionStorage
690
691#ifdef CONFIG_SPE
692 /* SPE Unavailable */
693 START_EXCEPTION(SPEUnavailable)
694 NORMAL_EXCEPTION_PROLOG
695 bne load_up_spe
696 addi r3,r1,STACK_FRAME_OVERHEAD
697 EXC_XFER_EE_LITE(0x2010, KernelSPE)
698#else
699 EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
700#endif /* CONFIG_SPE */
701
702 /* SPE Floating Point Data */
703#ifdef CONFIG_SPE
704 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
705#else
706 EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
707#endif /* CONFIG_SPE */
708
709 /* SPE Floating Point Round */
710 EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
711
712 /* Performance Monitor */
713 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
714
715
716 /* Debug Interrupt */
717 DEBUG_EXCEPTION
718
719/*
720 * Local functions
721 */
722
723 /*
724 * Data TLB exceptions will bail out to this point
725 * if they can't resolve the lightweight TLB fault.
726 */
727data_access:
728 NORMAL_EXCEPTION_PROLOG
729 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
730 stw r5,_ESR(r11)
731 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
732 andis. r10,r5,(ESR_ILK|ESR_DLK)@h
733 bne 1f
734 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
7351:
736 addi r3,r1,STACK_FRAME_OVERHEAD
737 EXC_XFER_EE_LITE(0x0300, CacheLockingException)
738
739/*
740
741 * Both the instruction and data TLB miss get to this
742 * point to load the TLB.
743 * r10 - EA of fault
744 * r11 - TLB (info from Linux PTE)
745 * r12, r13 - available to use
746 * CR5 - results of addr < TASK_SIZE
747 * MAS0, MAS1 - loaded with proper value when we get here
748 * MAS2, MAS3 - will need additional info from Linux PTE
749 * Upon exit, we reload everything and RFI.
750 */
751finish_tlb_load:
752 /*
753 * We set execute, because we don't have the granularity to
754 * properly set this at the page level (Linux problem).
755 * Many of these bits are software only. Bits we don't set
756 * here we (properly should) assume have the appropriate value.
757 */
758
759 mfspr r12, SPRN_MAS2
760#ifdef CONFIG_PTE_64BIT
761 rlwimi r12, r11, 26, 24, 31 /* extract ...WIMGE from pte */
762#else
763 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
764#endif
765 mtspr SPRN_MAS2, r12
766
767 bge 5, 1f
768
769 /* is user addr */
770 andi. r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC)
771 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
772 srwi r10, r12, 1
773 or r12, r12, r10 /* Copy user perms into supervisor */
774 iseleq r12, 0, r12
775 b 2f
776
777 /* is kernel addr */
7781: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */
779 ori r12, r12, (MAS3_SX | MAS3_SR)
780
781#ifdef CONFIG_PTE_64BIT
7822: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
783 rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */
784 mtspr SPRN_MAS3, r12
785BEGIN_FTR_SECTION
786 srwi r10, r13, 8 /* grab RPN[8:31] */
787 mtspr SPRN_MAS7, r10
788END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS)
789#else
7902: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */
791 mtspr SPRN_MAS3, r11
792#endif
793#ifdef CONFIG_E200
794 /* Round robin TLB1 entries assignment */
795 mfspr r12, SPRN_MAS0
796
797 /* Extract TLB1CFG(NENTRY) */
798 mfspr r11, SPRN_TLB1CFG
799 andi. r11, r11, 0xfff
800
801 /* Extract MAS0(NV) */
802 andi. r13, r12, 0xfff
803 addi r13, r13, 1
804 cmpw 0, r13, r11
805 addi r12, r12, 1
806
807 /* check if we need to wrap */
808 blt 7f
809
810 /* wrap back to first free tlbcam entry */
811 lis r13, tlbcam_index@ha
812 lwz r13, tlbcam_index@l(r13)
813 rlwimi r12, r13, 0, 20, 31
8147:
815 mtspr SPRN_MAS0,r12
816#endif /* CONFIG_E200 */
817
818 tlbwe
819
820 /* Done...restore registers and get out of here. */
821 mfspr r11, SPRN_SPRG7R
822 mtcr r11
823 mfspr r13, SPRN_SPRG5R
824 mfspr r12, SPRN_SPRG4R
825 mfspr r11, SPRN_SPRG1
826 mfspr r10, SPRN_SPRG0
827 rfi /* Force context change */
828
829#ifdef CONFIG_SPE
830/* Note that the SPE support is closely modeled after the AltiVec
831 * support. Changes to one are likely to be applicable to the
832 * other! */
833load_up_spe:
834/*
835 * Disable SPE for the task which had SPE previously,
836 * and save its SPE registers in its thread_struct.
837 * Enables SPE for use in the kernel on return.
838 * On SMP we know the SPE units are free, since we give it up every
839 * switch. -- Kumar
840 */
841 mfmsr r5
842 oris r5,r5,MSR_SPE@h
843 mtmsr r5 /* enable use of SPE now */
844 isync
845/*
846 * For SMP, we don't do lazy SPE switching because it just gets too
847 * horrendously complex, especially when a task switches from one CPU
848 * to another. Instead we call giveup_spe in switch_to.
849 */
850#ifndef CONFIG_SMP
851 lis r3,last_task_used_spe@ha
852 lwz r4,last_task_used_spe@l(r3)
853 cmpi 0,r4,0
854 beq 1f
855 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
856 SAVE_32EVRS(0,r10,r4)
857 evxor evr10, evr10, evr10 /* clear out evr10 */
858 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
859 li r5,THREAD_ACC
860 evstddx evr10, r4, r5 /* save off accumulator */
861 lwz r5,PT_REGS(r4)
862 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
863 lis r10,MSR_SPE@h
864 andc r4,r4,r10 /* disable SPE for previous task */
865 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8661:
867#endif /* CONFIG_SMP */
868 /* enable use of SPE after return */
869 oris r9,r9,MSR_SPE@h
870 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
871 li r4,1
872 li r10,THREAD_ACC
873 stw r4,THREAD_USED_SPE(r5)
874 evlddx evr4,r10,r5
875 evmra evr4,evr4
876 REST_32EVRS(0,r10,r5)
877#ifndef CONFIG_SMP
878 subi r4,r5,THREAD
879 stw r4,last_task_used_spe@l(r3)
880#endif /* CONFIG_SMP */
881 /* restore registers and return */
8822: REST_4GPRS(3, r11)
883 lwz r10,_CCR(r11)
884 REST_GPR(1, r11)
885 mtcr r10
886 lwz r10,_LINK(r11)
887 mtlr r10
888 REST_GPR(10, r11)
889 mtspr SPRN_SRR1,r9
890 mtspr SPRN_SRR0,r12
891 REST_GPR(9, r11)
892 REST_GPR(12, r11)
893 lwz r11,GPR11(r11)
894 SYNC
895 rfi
896
897/*
898 * SPE unavailable trap from kernel - print a message, but let
899 * the task use SPE in the kernel until it returns to user mode.
900 */
901KernelSPE:
902 lwz r3,_MSR(r1)
903 oris r3,r3,MSR_SPE@h
904 stw r3,_MSR(r1) /* enable use of SPE after return */
905 lis r3,87f@h
906 ori r3,r3,87f@l
907 mr r4,r2 /* current */
908 lwz r5,_NIP(r1)
909 bl printk
910 b ret_from_except
91187: .string "SPE used in kernel (task=%p, pc=%x) \n"
912 .align 4,0
913
914#endif /* CONFIG_SPE */
915
916/*
917 * Global functions
918 */
919
920/*
921 * extern void loadcam_entry(unsigned int index)
922 *
923 * Load TLBCAM[index] entry in to the L2 CAM MMU
924 */
925_GLOBAL(loadcam_entry)
926 lis r4,TLBCAM@ha
927 addi r4,r4,TLBCAM@l
928 mulli r5,r3,20
929 add r3,r5,r4
930 lwz r4,0(r3)
931 mtspr SPRN_MAS0,r4
932 lwz r4,4(r3)
933 mtspr SPRN_MAS1,r4
934 lwz r4,8(r3)
935 mtspr SPRN_MAS2,r4
936 lwz r4,12(r3)
937 mtspr SPRN_MAS3,r4
938 tlbwe
939 isync
940 blr
941
942/*
943 * extern void giveup_altivec(struct task_struct *prev)
944 *
945 * The e500 core does not have an AltiVec unit.
946 */
947_GLOBAL(giveup_altivec)
948 blr
949
950#ifdef CONFIG_SPE
951/*
952 * extern void giveup_spe(struct task_struct *prev)
953 *
954 */
955_GLOBAL(giveup_spe)
956 mfmsr r5
957 oris r5,r5,MSR_SPE@h
958 SYNC
959 mtmsr r5 /* enable use of SPE now */
960 isync
961 cmpi 0,r3,0
962 beqlr- /* if no previous owner, done */
963 addi r3,r3,THREAD /* want THREAD of task */
964 lwz r5,PT_REGS(r3)
965 cmpi 0,r5,0
966 SAVE_32EVRS(0, r4, r3)
967 evxor evr6, evr6, evr6 /* clear out evr6 */
968 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
969 li r4,THREAD_ACC
970 evstddx evr6, r4, r3 /* save off accumulator */
971 mfspr r6,SPRN_SPEFSCR
972 stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
973 beq 1f
974 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
975 lis r3,MSR_SPE@h
976 andc r4,r4,r3 /* disable SPE for previous task */
977 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9781:
979#ifndef CONFIG_SMP
980 li r5,0
981 lis r4,last_task_used_spe@ha
982 stw r5,last_task_used_spe@l(r4)
983#endif /* CONFIG_SMP */
984 blr
985#endif /* CONFIG_SPE */
986
987/*
988 * extern void giveup_fpu(struct task_struct *prev)
989 *
990 * Not all FSL Book-E cores have an FPU
991 */
992#ifndef CONFIG_PPC_FPU
993_GLOBAL(giveup_fpu)
994 blr
995#endif
996
997/*
998 * extern void abort(void)
999 *
1000 * At present, this routine just applies a system reset.
1001 */
1002_GLOBAL(abort)
1003 li r13,0
1004 mtspr SPRN_DBCR0,r13 /* disable all debug events */
1005 mfmsr r13
1006 ori r13,r13,MSR_DE@l /* Enable Debug Events */
1007 mtmsr r13
1008 mfspr r13,SPRN_DBCR0
1009 lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
1010 mtspr SPRN_DBCR0,r13
1011
1012_GLOBAL(set_context)
1013
1014#ifdef CONFIG_BDI_SWITCH
1015 /* Context switch the PTE pointer for the Abatron BDI2000.
1016 * The PGDIR is the second parameter.
1017 */
1018 lis r5, abatron_pteptrs@h
1019 ori r5, r5, abatron_pteptrs@l
1020 stw r4, 0x4(r5)
1021#endif
1022 mtspr SPRN_PID,r3
1023 isync /* Force context change */
1024 blr
1025
1026/*
1027 * We put a few things here that have to be page-aligned. This stuff
1028 * goes at the beginning of the data segment, which is page-aligned.
1029 */
1030 .data
1031 .align 12
1032 .globl sdata
1033sdata:
1034 .globl empty_zero_page
1035empty_zero_page:
1036 .space 4096
1037 .globl swapper_pg_dir
1038swapper_pg_dir:
1039 .space 4096
1040
1041/* Reserved 4k for the critical exception stack & 4k for the machine
1042 * check stack per CPU for kernel mode exceptions */
1043 .section .bss
1044 .align 12
1045exception_stack_bottom:
1046 .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS
1047 .globl exception_stack_top
1048exception_stack_top:
1049
1050/*
1051 * This space gets a copy of optional info passed to us by the bootstrap
1052 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1053 */
1054 .globl cmd_line
1055cmd_line:
1056 .space 512
1057
1058/*
1059 * Room for two PTE pointers, usually the kernel and current user pointers
1060 * to their respective root page table.
1061 */
1062abatron_pteptrs:
1063 .space 8
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
new file mode 100644
index 000000000000..444fdcc769f1
--- /dev/null
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -0,0 +1,233 @@
1/*
2 * This file contains the power_save function for 6xx & 7xxx CPUs
3 * rewritten in assembler
4 *
5 * Warning ! This code assumes that if your machine has a 750fx
6 * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
7 * if this is not the case some additional changes will have to
8 * be done to check a runtime var (a bit like powersave-nap)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/config.h>
17#include <linux/threads.h>
18#include <asm/reg.h>
19#include <asm/page.h>
20#include <asm/cputable.h>
21#include <asm/thread_info.h>
22#include <asm/ppc_asm.h>
23#include <asm/asm-offsets.h>
24
25#undef DEBUG
26
27 .text
28
29/*
30 * Init idle, called at early CPU setup time from head.S for each CPU
31 * Make sure no rest of NAP mode remains in HID0, save default
32 * values for some CPU specific registers. Called with r24
33 * containing CPU number and r3 reloc offset
34 */
35_GLOBAL(init_idle_6xx)
36BEGIN_FTR_SECTION
37 mfspr r4,SPRN_HID0
38 rlwinm r4,r4,0,10,8 /* Clear NAP */
39 mtspr SPRN_HID0, r4
40 b 1f
41END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
42 blr
431:
44 slwi r5,r24,2
45 add r5,r5,r3
46BEGIN_FTR_SECTION
47 mfspr r4,SPRN_MSSCR0
48 addis r6,r5, nap_save_msscr0@ha
49 stw r4,nap_save_msscr0@l(r6)
50END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
51BEGIN_FTR_SECTION
52 mfspr r4,SPRN_HID1
53 addis r6,r5,nap_save_hid1@ha
54 stw r4,nap_save_hid1@l(r6)
55END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
56 blr
57
58/*
59 * Here is the power_save_6xx function. This could eventually be
60 * split into several functions & changing the function pointer
61 * depending on the various features.
62 */
63_GLOBAL(ppc6xx_idle)
64 /* Check if we can nap or doze, put HID0 mask in r3
65 */
66 lis r3, 0
67BEGIN_FTR_SECTION
68 lis r3,HID0_DOZE@h
69END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
70BEGIN_FTR_SECTION
71 /* We must dynamically check for the NAP feature as it
72 * can be cleared by CPU init after the fixups are done
73 */
74 lis r4,cur_cpu_spec@ha
75 lwz r4,cur_cpu_spec@l(r4)
76 lwz r4,CPU_SPEC_FEATURES(r4)
77 andi. r0,r4,CPU_FTR_CAN_NAP
78 beq 1f
79 /* Now check if user or arch enabled NAP mode */
80 lis r4,powersave_nap@ha
81 lwz r4,powersave_nap@l(r4)
82 cmpwi 0,r4,0
83 beq 1f
84 lis r3,HID0_NAP@h
851:
86END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
87 cmpwi 0,r3,0
88 beqlr
89
90 /* Clear MSR:EE */
91 mfmsr r7
92 rlwinm r0,r7,0,17,15
93 mtmsr r0
94
95 /* Check current_thread_info()->flags */
96 rlwinm r4,r1,0,0,18
97 lwz r4,TI_FLAGS(r4)
98 andi. r0,r4,_TIF_NEED_RESCHED
99 beq 1f
100 mtmsr r7 /* out of line this ? */
101 blr
1021:
103 /* Some pre-nap cleanups needed on some CPUs */
104 andis. r0,r3,HID0_NAP@h
105 beq 2f
106BEGIN_FTR_SECTION
107 /* Disable L2 prefetch on some 745x and try to ensure
108 * L2 prefetch engines are idle. As explained by errata
109 * text, we can't be sure they are, we just hope very hard
110 * that well be enough (sic !). At least I noticed Apple
111 * doesn't even bother doing the dcbf's here...
112 */
113 mfspr r4,SPRN_MSSCR0
114 rlwinm r4,r4,0,0,29
115 sync
116 mtspr SPRN_MSSCR0,r4
117 sync
118 isync
119 lis r4,KERNELBASE@h
120 dcbf 0,r4
121 dcbf 0,r4
122 dcbf 0,r4
123 dcbf 0,r4
124END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
125#ifdef DEBUG
126 lis r6,nap_enter_count@ha
127 lwz r4,nap_enter_count@l(r6)
128 addi r4,r4,1
129 stw r4,nap_enter_count@l(r6)
130#endif
1312:
132BEGIN_FTR_SECTION
133 /* Go to low speed mode on some 750FX */
134 lis r4,powersave_lowspeed@ha
135 lwz r4,powersave_lowspeed@l(r4)
136 cmpwi 0,r4,0
137 beq 1f
138 mfspr r4,SPRN_HID1
139 oris r4,r4,0x0001
140 mtspr SPRN_HID1,r4
1411:
142END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
143
144 /* Go to NAP or DOZE now */
145 mfspr r4,SPRN_HID0
146 lis r5,(HID0_NAP|HID0_SLEEP)@h
147BEGIN_FTR_SECTION
148 oris r5,r5,HID0_DOZE@h
149END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
150 andc r4,r4,r5
151 or r4,r4,r3
152BEGIN_FTR_SECTION
153 oris r4,r4,HID0_DPM@h /* that should be done once for all */
154END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
155 mtspr SPRN_HID0,r4
156BEGIN_FTR_SECTION
157 DSSALL
158 sync
159END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
160 ori r7,r7,MSR_EE /* Could be ommited (already set) */
161 oris r7,r7,MSR_POW@h
162 sync
163 isync
164 mtmsr r7
165 isync
166 sync
167 blr
168
169/*
170 * Return from NAP/DOZE mode, restore some CPU specific registers,
171 * we are called with DR/IR still off and r2 containing physical
172 * address of current.
173 */
174_GLOBAL(power_save_6xx_restore)
175 mfspr r11,SPRN_HID0
176 rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */
177 cror 4*cr1+eq,4*cr0+eq,4*cr0+eq
178BEGIN_FTR_SECTION
179 rlwinm r11,r11,0,9,7 /* Clear DOZE */
180END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
181 mtspr SPRN_HID0, r11
182
183#ifdef DEBUG
184 beq cr1,1f
185 lis r11,(nap_return_count-KERNELBASE)@ha
186 lwz r9,nap_return_count@l(r11)
187 addi r9,r9,1
188 stw r9,nap_return_count@l(r11)
1891:
190#endif
191
192 rlwinm r9,r1,0,0,18
193 tophys(r9,r9)
194 lwz r11,TI_CPU(r9)
195 slwi r11,r11,2
196 /* Todo make sure all these are in the same page
197 * and load r22 (@ha part + CPU offset) only once
198 */
199BEGIN_FTR_SECTION
200 beq cr1,1f
201 addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
202 lwz r9,nap_save_msscr0@l(r9)
203 mtspr SPRN_MSSCR0, r9
204 sync
205 isync
2061:
207END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
208BEGIN_FTR_SECTION
209 addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
210 lwz r9,nap_save_hid1@l(r9)
211 mtspr SPRN_HID1, r9
212END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
213 b transfer_to_handler_cont
214
215 .data
216
217_GLOBAL(nap_save_msscr0)
218 .space 4*NR_CPUS
219
220_GLOBAL(nap_save_hid1)
221 .space 4*NR_CPUS
222
223_GLOBAL(powersave_nap)
224 .long 0
225_GLOBAL(powersave_lowspeed)
226 .long 0
227
228#ifdef DEBUG
229_GLOBAL(nap_enter_count)
230 .space 4
231_GLOBAL(nap_return_count)
232 .space 4
233#endif
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
new file mode 100644
index 000000000000..1494e2f177f7
--- /dev/null
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -0,0 +1,78 @@
1/*
2 * This file contains the power_save function for 6xx & 7xxx CPUs
3 * rewritten in assembler
4 *
5 * Warning ! This code assumes that if your machine has a 750fx
6 * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
7 * if this is not the case some additional changes will have to
8 * be done to check a runtime var (a bit like powersave-nap)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/config.h>
17#include <linux/threads.h>
18#include <asm/processor.h>
19#include <asm/page.h>
20#include <asm/cputable.h>
21#include <asm/thread_info.h>
22#include <asm/ppc_asm.h>
23#include <asm/asm-offsets.h>
24
25#undef DEBUG
26
27 .text
28
29/*
30 * Here is the power_save_6xx function. This could eventually be
31 * split into several functions & changing the function pointer
32 * depending on the various features.
33 */
34_GLOBAL(power4_idle)
35BEGIN_FTR_SECTION
36 blr
37END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
38 /* We must dynamically check for the NAP feature as it
39 * can be cleared by CPU init after the fixups are done
40 */
41 LOADBASE(r3,cur_cpu_spec)
42 ld r4,OFF(cur_cpu_spec)(r3)
43 ld r4,CPU_SPEC_FEATURES(r4)
44 andi. r0,r4,CPU_FTR_CAN_NAP
45 beqlr
46 /* Now check if user or arch enabled NAP mode */
47 LOADBASE(r3,powersave_nap)
48 lwz r4,OFF(powersave_nap)(r3)
49 cmpwi 0,r4,0
50 beqlr
51
52 /* Clear MSR:EE */
53 mfmsr r7
54 li r4,0
55 ori r4,r4,MSR_EE
56 andc r0,r7,r4
57 mtmsrd r0
58
59 /* Check current_thread_info()->flags */
60 clrrdi r4,r1,THREAD_SHIFT
61 ld r4,TI_FLAGS(r4)
62 andi. r0,r4,_TIF_NEED_RESCHED
63 beq 1f
64 mtmsrd r7 /* out of line this ? */
65 blr
661:
67 /* Go to NAP now */
68BEGIN_FTR_SECTION
69 DSSALL
70 sync
71END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
72 oris r7,r7,MSR_POW@h
73 sync
74 isync
75 mtmsrd r7
76 isync
77 sync
78 blr
diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c
new file mode 100644
index 000000000000..941043ae040f
--- /dev/null
+++ b/arch/powerpc/kernel/init_task.c
@@ -0,0 +1,36 @@
1#include <linux/mm.h>
2#include <linux/module.h>
3#include <linux/sched.h>
4#include <linux/init.h>
5#include <linux/init_task.h>
6#include <linux/fs.h>
7#include <linux/mqueue.h>
8#include <asm/uaccess.h>
9
10static struct fs_struct init_fs = INIT_FS;
11static struct files_struct init_files = INIT_FILES;
12static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
13static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
14struct mm_struct init_mm = INIT_MM(init_mm);
15
16EXPORT_SYMBOL(init_mm);
17
18/*
19 * Initial thread structure.
20 *
21 * We need to make sure that this is 16384-byte aligned due to the
22 * way process stacks are handled. This is done by having a special
23 * "init_task" linker map entry..
24 */
25union thread_union init_thread_union
26 __attribute__((__section__(".data.init_task"))) =
27 { INIT_THREAD_INFO(init_task) };
28
29/*
30 * Initial task structure.
31 *
32 * All other task structs will be allocated on slabs in fork.c
33 */
34struct task_struct init_task = INIT_TASK(init_task);
35
36EXPORT_SYMBOL(init_task);
diff --git a/arch/powerpc/kernel/lparmap.c b/arch/powerpc/kernel/lparmap.c
new file mode 100644
index 000000000000..b81de286df5e
--- /dev/null
+++ b/arch/powerpc/kernel/lparmap.c
@@ -0,0 +1,31 @@
1/*
2 * Copyright (C) 2005 Stephen Rothwell IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <asm/mmu.h>
10#include <asm/page.h>
11#include <asm/iSeries/LparMap.h>
12
13const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
14 .xNumberEsids = HvEsidsToMap,
15 .xNumberRanges = HvRangesToMap,
16 .xSegmentTableOffs = STAB0_PAGE,
17
18 .xEsids = {
19 { .xKernelEsid = GET_ESID(KERNELBASE),
20 .xKernelVsid = KERNEL_VSID(KERNELBASE), },
21 { .xKernelEsid = GET_ESID(VMALLOCBASE),
22 .xKernelVsid = KERNEL_VSID(VMALLOCBASE), },
23 },
24
25 .xRanges = {
26 { .xPages = HvPagesToMap,
27 .xOffset = 0,
28 .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - PAGE_SHIFT),
29 },
30 },
31};
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
new file mode 100644
index 000000000000..3bedb532aed9
--- /dev/null
+++ b/arch/powerpc/kernel/misc_32.S
@@ -0,0 +1,1037 @@
1/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 */
14
15#include <linux/config.h>
16#include <linux/sys.h>
17#include <asm/unistd.h>
18#include <asm/errno.h>
19#include <asm/reg.h>
20#include <asm/page.h>
21#include <asm/cache.h>
22#include <asm/cputable.h>
23#include <asm/mmu.h>
24#include <asm/ppc_asm.h>
25#include <asm/thread_info.h>
26#include <asm/asm-offsets.h>
27
28 .text
29
30 .align 5
31_GLOBAL(__delay)
32 cmpwi 0,r3,0
33 mtctr r3
34 beqlr
351: bdnz 1b
36 blr
37
38/*
39 * This returns the high 64 bits of the product of two 64-bit numbers.
40 */
41_GLOBAL(mulhdu)
42 cmpwi r6,0
43 cmpwi cr1,r3,0
44 mr r10,r4
45 mulhwu r4,r4,r5
46 beq 1f
47 mulhwu r0,r10,r6
48 mullw r7,r10,r5
49 addc r7,r0,r7
50 addze r4,r4
511: beqlr cr1 /* all done if high part of A is 0 */
52 mr r10,r3
53 mullw r9,r3,r5
54 mulhwu r3,r3,r5
55 beq 2f
56 mullw r0,r10,r6
57 mulhwu r8,r10,r6
58 addc r7,r0,r7
59 adde r4,r4,r8
60 addze r3,r3
612: addc r4,r4,r9
62 addze r3,r3
63 blr
64
65/*
66 * Returns (address we're running at) - (address we were linked at)
67 * for use before the text and data are mapped to KERNELBASE.
68 */
69_GLOBAL(reloc_offset)
70 mflr r0
71 bl 1f
721: mflr r3
73 LOADADDR(r4,1b)
74 subf r3,r4,r3
75 mtlr r0
76 blr
77
78/*
79 * add_reloc_offset(x) returns x + reloc_offset().
80 */
81_GLOBAL(add_reloc_offset)
82 mflr r0
83 bl 1f
841: mflr r5
85 LOADADDR(r4,1b)
86 subf r5,r4,r5
87 add r3,r3,r5
88 mtlr r0
89 blr
90
91/*
92 * sub_reloc_offset(x) returns x - reloc_offset().
93 */
94_GLOBAL(sub_reloc_offset)
95 mflr r0
96 bl 1f
971: mflr r5
98 lis r4,1b@ha
99 addi r4,r4,1b@l
100 subf r5,r4,r5
101 subf r3,r5,r3
102 mtlr r0
103 blr
104
105/*
106 * reloc_got2 runs through the .got2 section adding an offset
107 * to each entry.
108 */
109_GLOBAL(reloc_got2)
110 mflr r11
111 lis r7,__got2_start@ha
112 addi r7,r7,__got2_start@l
113 lis r8,__got2_end@ha
114 addi r8,r8,__got2_end@l
115 subf r8,r7,r8
116 srwi. r8,r8,2
117 beqlr
118 mtctr r8
119 bl 1f
1201: mflr r0
121 lis r4,1b@ha
122 addi r4,r4,1b@l
123 subf r0,r4,r0
124 add r7,r0,r7
1252: lwz r0,0(r7)
126 add r0,r0,r3
127 stw r0,0(r7)
128 addi r7,r7,4
129 bdnz 2b
130 mtlr r11
131 blr
132
133/*
134 * identify_cpu,
135 * called with r3 = data offset and r4 = CPU number
136 * doesn't change r3
137 */
138_GLOBAL(identify_cpu)
139 addis r8,r3,cpu_specs@ha
140 addi r8,r8,cpu_specs@l
141 mfpvr r7
1421:
143 lwz r5,CPU_SPEC_PVR_MASK(r8)
144 and r5,r5,r7
145 lwz r6,CPU_SPEC_PVR_VALUE(r8)
146 cmplw 0,r6,r5
147 beq 1f
148 addi r8,r8,CPU_SPEC_ENTRY_SIZE
149 b 1b
1501:
151 addis r6,r3,cur_cpu_spec@ha
152 addi r6,r6,cur_cpu_spec@l
153 sub r8,r8,r3
154 stw r8,0(r6)
155 blr
156
157/*
158 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
159 * and writes nop's over sections of code that don't apply for this cpu.
160 * r3 = data offset (not changed)
161 */
162_GLOBAL(do_cpu_ftr_fixups)
163 /* Get CPU 0 features */
164 addis r6,r3,cur_cpu_spec@ha
165 addi r6,r6,cur_cpu_spec@l
166 lwz r4,0(r6)
167 add r4,r4,r3
168 lwz r4,CPU_SPEC_FEATURES(r4)
169
170 /* Get the fixup table */
171 addis r6,r3,__start___ftr_fixup@ha
172 addi r6,r6,__start___ftr_fixup@l
173 addis r7,r3,__stop___ftr_fixup@ha
174 addi r7,r7,__stop___ftr_fixup@l
175
176 /* Do the fixup */
1771: cmplw 0,r6,r7
178 bgelr
179 addi r6,r6,16
180 lwz r8,-16(r6) /* mask */
181 and r8,r8,r4
182 lwz r9,-12(r6) /* value */
183 cmplw 0,r8,r9
184 beq 1b
185 lwz r8,-8(r6) /* section begin */
186 lwz r9,-4(r6) /* section end */
187 subf. r9,r8,r9
188 beq 1b
189 /* write nops over the section of code */
190 /* todo: if large section, add a branch at the start of it */
191 srwi r9,r9,2
192 mtctr r9
193 add r8,r8,r3
194 lis r0,0x60000000@h /* nop */
1953: stw r0,0(r8)
196 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
197 beq 2f
198 dcbst 0,r8 /* suboptimal, but simpler */
199 sync
200 icbi 0,r8
2012: addi r8,r8,4
202 bdnz 3b
203 sync /* additional sync needed on g4 */
204 isync
205 b 1b
206
207/*
208 * call_setup_cpu - call the setup_cpu function for this cpu
209 * r3 = data offset, r24 = cpu number
210 *
211 * Setup function is called with:
212 * r3 = data offset
213 * r4 = ptr to CPU spec (relocated)
214 */
215_GLOBAL(call_setup_cpu)
216 addis r4,r3,cur_cpu_spec@ha
217 addi r4,r4,cur_cpu_spec@l
218 lwz r4,0(r4)
219 add r4,r4,r3
220 lwz r5,CPU_SPEC_SETUP(r4)
221 cmpi 0,r5,0
222 add r5,r5,r3
223 beqlr
224 mtctr r5
225 bctr
226
227#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
228
229/* This gets called by via-pmu.c to switch the PLL selection
230 * on 750fx CPU. This function should really be moved to some
231 * other place (as most of the cpufreq code in via-pmu
232 */
233_GLOBAL(low_choose_750fx_pll)
234 /* Clear MSR:EE */
235 mfmsr r7
236 rlwinm r0,r7,0,17,15
237 mtmsr r0
238
239 /* If switching to PLL1, disable HID0:BTIC */
240 cmplwi cr0,r3,0
241 beq 1f
242 mfspr r5,SPRN_HID0
243 rlwinm r5,r5,0,27,25
244 sync
245 mtspr SPRN_HID0,r5
246 isync
247 sync
248
2491:
250 /* Calc new HID1 value */
251 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
252 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
253 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
254 or r4,r4,r5
255 mtspr SPRN_HID1,r4
256
257 /* Store new HID1 image */
258 rlwinm r6,r1,0,0,18
259 lwz r6,TI_CPU(r6)
260 slwi r6,r6,2
261 addis r6,r6,nap_save_hid1@ha
262 stw r4,nap_save_hid1@l(r6)
263
264 /* If switching to PLL0, enable HID0:BTIC */
265 cmplwi cr0,r3,0
266 bne 1f
267 mfspr r5,SPRN_HID0
268 ori r5,r5,HID0_BTIC
269 sync
270 mtspr SPRN_HID0,r5
271 isync
272 sync
273
2741:
275 /* Return */
276 mtmsr r7
277 blr
278
279_GLOBAL(low_choose_7447a_dfs)
280 /* Clear MSR:EE */
281 mfmsr r7
282 rlwinm r0,r7,0,17,15
283 mtmsr r0
284
285 /* Calc new HID1 value */
286 mfspr r4,SPRN_HID1
287 insrwi r4,r3,1,9 /* insert parameter into bit 9 */
288 sync
289 mtspr SPRN_HID1,r4
290 sync
291 isync
292
293 /* Return */
294 mtmsr r7
295 blr
296
297#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
298
299/*
300 * complement mask on the msr then "or" some values on.
301 * _nmask_and_or_msr(nmask, value_to_or)
302 */
303_GLOBAL(_nmask_and_or_msr)
304 mfmsr r0 /* Get current msr */
305 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
306 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
307 SYNC /* Some chip revs have problems here... */
308 mtmsr r0 /* Update machine state */
309 isync
310 blr /* Done */
311
312
313/*
314 * Flush MMU TLB
315 */
316_GLOBAL(_tlbia)
317#if defined(CONFIG_40x)
318 sync /* Flush to memory before changing mapping */
319 tlbia
320 isync /* Flush shadow TLB */
321#elif defined(CONFIG_44x)
322 li r3,0
323 sync
324
325 /* Load high watermark */
326 lis r4,tlb_44x_hwater@ha
327 lwz r5,tlb_44x_hwater@l(r4)
328
3291: tlbwe r3,r3,PPC44x_TLB_PAGEID
330 addi r3,r3,1
331 cmpw 0,r3,r5
332 ble 1b
333
334 isync
335#elif defined(CONFIG_FSL_BOOKE)
336 /* Invalidate all entries in TLB0 */
337 li r3, 0x04
338 tlbivax 0,3
339 /* Invalidate all entries in TLB1 */
340 li r3, 0x0c
341 tlbivax 0,3
342 /* Invalidate all entries in TLB2 */
343 li r3, 0x14
344 tlbivax 0,3
345 /* Invalidate all entries in TLB3 */
346 li r3, 0x1c
347 tlbivax 0,3
348 msync
349#ifdef CONFIG_SMP
350 tlbsync
351#endif /* CONFIG_SMP */
352#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
353#if defined(CONFIG_SMP)
354 rlwinm r8,r1,0,0,18
355 lwz r8,TI_CPU(r8)
356 oris r8,r8,10
357 mfmsr r10
358 SYNC
359 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
360 rlwinm r0,r0,0,28,26 /* clear DR */
361 mtmsr r0
362 SYNC_601
363 isync
364 lis r9,mmu_hash_lock@h
365 ori r9,r9,mmu_hash_lock@l
366 tophys(r9,r9)
36710: lwarx r7,0,r9
368 cmpwi 0,r7,0
369 bne- 10b
370 stwcx. r8,0,r9
371 bne- 10b
372 sync
373 tlbia
374 sync
375 TLBSYNC
376 li r0,0
377 stw r0,0(r9) /* clear mmu_hash_lock */
378 mtmsr r10
379 SYNC_601
380 isync
381#else /* CONFIG_SMP */
382 sync
383 tlbia
384 sync
385#endif /* CONFIG_SMP */
386#endif /* ! defined(CONFIG_40x) */
387 blr
388
389/*
390 * Flush MMU TLB for a particular address
391 */
392_GLOBAL(_tlbie)
393#if defined(CONFIG_40x)
394 tlbsx. r3, 0, r3
395 bne 10f
396 sync
397 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
398 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
399 * the TLB entry. */
400 tlbwe r3, r3, TLB_TAG
401 isync
40210:
403#elif defined(CONFIG_44x)
404 mfspr r4,SPRN_MMUCR
405 mfspr r5,SPRN_PID /* Get PID */
406 rlwimi r4,r5,0,24,31 /* Set TID */
407 mtspr SPRN_MMUCR,r4
408
409 tlbsx. r3, 0, r3
410 bne 10f
411 sync
412 /* There are only 64 TLB entries, so r3 < 64,
413 * which means bit 22, is clear. Since 22 is
414 * the V bit in the TLB_PAGEID, loading this
415 * value will invalidate the TLB entry.
416 */
417 tlbwe r3, r3, PPC44x_TLB_PAGEID
418 isync
41910:
420#elif defined(CONFIG_FSL_BOOKE)
421 rlwinm r4, r3, 0, 0, 19
422 ori r5, r4, 0x08 /* TLBSEL = 1 */
423 ori r6, r4, 0x10 /* TLBSEL = 2 */
424 ori r7, r4, 0x18 /* TLBSEL = 3 */
425 tlbivax 0, r4
426 tlbivax 0, r5
427 tlbivax 0, r6
428 tlbivax 0, r7
429 msync
430#if defined(CONFIG_SMP)
431 tlbsync
432#endif /* CONFIG_SMP */
433#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
434#if defined(CONFIG_SMP)
435 rlwinm r8,r1,0,0,18
436 lwz r8,TI_CPU(r8)
437 oris r8,r8,11
438 mfmsr r10
439 SYNC
440 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
441 rlwinm r0,r0,0,28,26 /* clear DR */
442 mtmsr r0
443 SYNC_601
444 isync
445 lis r9,mmu_hash_lock@h
446 ori r9,r9,mmu_hash_lock@l
447 tophys(r9,r9)
44810: lwarx r7,0,r9
449 cmpwi 0,r7,0
450 bne- 10b
451 stwcx. r8,0,r9
452 bne- 10b
453 eieio
454 tlbie r3
455 sync
456 TLBSYNC
457 li r0,0
458 stw r0,0(r9) /* clear mmu_hash_lock */
459 mtmsr r10
460 SYNC_601
461 isync
462#else /* CONFIG_SMP */
463 tlbie r3
464 sync
465#endif /* CONFIG_SMP */
466#endif /* ! CONFIG_40x */
467 blr
468
469/*
470 * Flush instruction cache.
471 * This is a no-op on the 601.
472 */
473_GLOBAL(flush_instruction_cache)
474#if defined(CONFIG_8xx)
475 isync
476 lis r5, IDC_INVALL@h
477 mtspr SPRN_IC_CST, r5
478#elif defined(CONFIG_4xx)
479#ifdef CONFIG_403GCX
480 li r3, 512
481 mtctr r3
482 lis r4, KERNELBASE@h
4831: iccci 0, r4
484 addi r4, r4, 16
485 bdnz 1b
486#else
487 lis r3, KERNELBASE@h
488 iccci 0,r3
489#endif
490#elif CONFIG_FSL_BOOKE
491BEGIN_FTR_SECTION
492 mfspr r3,SPRN_L1CSR0
493 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
494 /* msync; isync recommended here */
495 mtspr SPRN_L1CSR0,r3
496 isync
497 blr
498END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
499 mfspr r3,SPRN_L1CSR1
500 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
501 mtspr SPRN_L1CSR1,r3
502#else
503 mfspr r3,SPRN_PVR
504 rlwinm r3,r3,16,16,31
505 cmpwi 0,r3,1
506 beqlr /* for 601, do nothing */
507 /* 603/604 processor - use invalidate-all bit in HID0 */
508 mfspr r3,SPRN_HID0
509 ori r3,r3,HID0_ICFI
510 mtspr SPRN_HID0,r3
511#endif /* CONFIG_8xx/4xx */
512 isync
513 blr
514
515/*
516 * Write any modified data cache blocks out to memory
517 * and invalidate the corresponding instruction cache blocks.
518 * This is a no-op on the 601.
519 *
520 * flush_icache_range(unsigned long start, unsigned long stop)
521 */
522_GLOBAL(flush_icache_range)
523BEGIN_FTR_SECTION
524 blr /* for 601, do nothing */
525END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
526 li r5,L1_CACHE_BYTES-1
527 andc r3,r3,r5
528 subf r4,r3,r4
529 add r4,r4,r5
530 srwi. r4,r4,L1_CACHE_SHIFT
531 beqlr
532 mtctr r4
533 mr r6,r3
5341: dcbst 0,r3
535 addi r3,r3,L1_CACHE_BYTES
536 bdnz 1b
537 sync /* wait for dcbst's to get to ram */
538 mtctr r4
5392: icbi 0,r6
540 addi r6,r6,L1_CACHE_BYTES
541 bdnz 2b
542 sync /* additional sync needed on g4 */
543 isync
544 blr
545/*
546 * Write any modified data cache blocks out to memory.
547 * Does not invalidate the corresponding cache lines (especially for
548 * any corresponding instruction cache).
549 *
550 * clean_dcache_range(unsigned long start, unsigned long stop)
551 */
552_GLOBAL(clean_dcache_range)
553 li r5,L1_CACHE_BYTES-1
554 andc r3,r3,r5
555 subf r4,r3,r4
556 add r4,r4,r5
557 srwi. r4,r4,L1_CACHE_SHIFT
558 beqlr
559 mtctr r4
560
5611: dcbst 0,r3
562 addi r3,r3,L1_CACHE_BYTES
563 bdnz 1b
564 sync /* wait for dcbst's to get to ram */
565 blr
566
567/*
568 * Write any modified data cache blocks out to memory and invalidate them.
569 * Does not invalidate the corresponding instruction cache blocks.
570 *
571 * flush_dcache_range(unsigned long start, unsigned long stop)
572 */
573_GLOBAL(flush_dcache_range)
574 li r5,L1_CACHE_BYTES-1
575 andc r3,r3,r5
576 subf r4,r3,r4
577 add r4,r4,r5
578 srwi. r4,r4,L1_CACHE_SHIFT
579 beqlr
580 mtctr r4
581
5821: dcbf 0,r3
583 addi r3,r3,L1_CACHE_BYTES
584 bdnz 1b
585 sync /* wait for dcbst's to get to ram */
586 blr
587
588/*
589 * Like above, but invalidate the D-cache. This is used by the 8xx
590 * to invalidate the cache so the PPC core doesn't get stale data
591 * from the CPM (no cache snooping here :-).
592 *
593 * invalidate_dcache_range(unsigned long start, unsigned long stop)
594 */
595_GLOBAL(invalidate_dcache_range)
596 li r5,L1_CACHE_BYTES-1
597 andc r3,r3,r5
598 subf r4,r3,r4
599 add r4,r4,r5
600 srwi. r4,r4,L1_CACHE_SHIFT
601 beqlr
602 mtctr r4
603
6041: dcbi 0,r3
605 addi r3,r3,L1_CACHE_BYTES
606 bdnz 1b
607 sync /* wait for dcbi's to get to ram */
608 blr
609
610#ifdef CONFIG_NOT_COHERENT_CACHE
611/*
612 * 40x cores have 8K or 16K dcache and 32 byte line size.
613 * 44x has a 32K dcache and 32 byte line size.
614 * 8xx has 1, 2, 4, 8K variants.
615 * For now, cover the worst case of the 44x.
616 * Must be called with external interrupts disabled.
617 */
618#define CACHE_NWAYS 64
619#define CACHE_NLINES 16
620
621_GLOBAL(flush_dcache_all)
622 li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
623 mtctr r4
624 lis r5, KERNELBASE@h
6251: lwz r3, 0(r5) /* Load one word from every line */
626 addi r5, r5, L1_CACHE_BYTES
627 bdnz 1b
628 blr
629#endif /* CONFIG_NOT_COHERENT_CACHE */
630
631/*
632 * Flush a particular page from the data cache to RAM.
633 * Note: this is necessary because the instruction cache does *not*
634 * snoop from the data cache.
635 * This is a no-op on the 601 which has a unified cache.
636 *
637 * void __flush_dcache_icache(void *page)
638 */
639_GLOBAL(__flush_dcache_icache)
640BEGIN_FTR_SECTION
641 blr /* for 601, do nothing */
642END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
643 rlwinm r3,r3,0,0,19 /* Get page base address */
644 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
645 mtctr r4
646 mr r6,r3
6470: dcbst 0,r3 /* Write line to ram */
648 addi r3,r3,L1_CACHE_BYTES
649 bdnz 0b
650 sync
651 mtctr r4
6521: icbi 0,r6
653 addi r6,r6,L1_CACHE_BYTES
654 bdnz 1b
655 sync
656 isync
657 blr
658
659/*
660 * Flush a particular page from the data cache to RAM, identified
661 * by its physical address. We turn off the MMU so we can just use
662 * the physical address (this may be a highmem page without a kernel
663 * mapping).
664 *
665 * void __flush_dcache_icache_phys(unsigned long physaddr)
666 */
667_GLOBAL(__flush_dcache_icache_phys)
668BEGIN_FTR_SECTION
669 blr /* for 601, do nothing */
670END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
671 mfmsr r10
672 rlwinm r0,r10,0,28,26 /* clear DR */
673 mtmsr r0
674 isync
675 rlwinm r3,r3,0,0,19 /* Get page base address */
676 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
677 mtctr r4
678 mr r6,r3
6790: dcbst 0,r3 /* Write line to ram */
680 addi r3,r3,L1_CACHE_BYTES
681 bdnz 0b
682 sync
683 mtctr r4
6841: icbi 0,r6
685 addi r6,r6,L1_CACHE_BYTES
686 bdnz 1b
687 sync
688 mtmsr r10 /* restore DR */
689 isync
690 blr
691
692/*
693 * Clear pages using the dcbz instruction, which doesn't cause any
694 * memory traffic (except to write out any cache lines which get
695 * displaced). This only works on cacheable memory.
696 *
697 * void clear_pages(void *page, int order) ;
698 */
699_GLOBAL(clear_pages)
700 li r0,4096/L1_CACHE_BYTES
701 slw r0,r0,r4
702 mtctr r0
703#ifdef CONFIG_8xx
704 li r4, 0
7051: stw r4, 0(r3)
706 stw r4, 4(r3)
707 stw r4, 8(r3)
708 stw r4, 12(r3)
709#else
7101: dcbz 0,r3
711#endif
712 addi r3,r3,L1_CACHE_BYTES
713 bdnz 1b
714 blr
715
716/*
717 * Copy a whole page. We use the dcbz instruction on the destination
718 * to reduce memory traffic (it eliminates the unnecessary reads of
719 * the destination into cache). This requires that the destination
720 * is cacheable.
721 */
722#define COPY_16_BYTES \
723 lwz r6,4(r4); \
724 lwz r7,8(r4); \
725 lwz r8,12(r4); \
726 lwzu r9,16(r4); \
727 stw r6,4(r3); \
728 stw r7,8(r3); \
729 stw r8,12(r3); \
730 stwu r9,16(r3)
731
732_GLOBAL(copy_page)
733 addi r3,r3,-4
734 addi r4,r4,-4
735
736#ifdef CONFIG_8xx
737 /* don't use prefetch on 8xx */
738 li r0,4096/L1_CACHE_BYTES
739 mtctr r0
7401: COPY_16_BYTES
741 bdnz 1b
742 blr
743
744#else /* not 8xx, we can prefetch */
745 li r5,4
746
747#if MAX_COPY_PREFETCH > 1
748 li r0,MAX_COPY_PREFETCH
749 li r11,4
750 mtctr r0
75111: dcbt r11,r4
752 addi r11,r11,L1_CACHE_BYTES
753 bdnz 11b
754#else /* MAX_COPY_PREFETCH == 1 */
755 dcbt r5,r4
756 li r11,L1_CACHE_BYTES+4
757#endif /* MAX_COPY_PREFETCH */
758 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
759 crclr 4*cr0+eq
7602:
761 mtctr r0
7621:
763 dcbt r11,r4
764 dcbz r5,r3
765 COPY_16_BYTES
766#if L1_CACHE_BYTES >= 32
767 COPY_16_BYTES
768#if L1_CACHE_BYTES >= 64
769 COPY_16_BYTES
770 COPY_16_BYTES
771#if L1_CACHE_BYTES >= 128
772 COPY_16_BYTES
773 COPY_16_BYTES
774 COPY_16_BYTES
775 COPY_16_BYTES
776#endif
777#endif
778#endif
779 bdnz 1b
780 beqlr
781 crnot 4*cr0+eq,4*cr0+eq
782 li r0,MAX_COPY_PREFETCH
783 li r11,4
784 b 2b
785#endif /* CONFIG_8xx */
786
787/*
788 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
789 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
790 */
791_GLOBAL(atomic_clear_mask)
79210: lwarx r5,0,r4
793 andc r5,r5,r3
794 PPC405_ERR77(0,r4)
795 stwcx. r5,0,r4
796 bne- 10b
797 blr
798_GLOBAL(atomic_set_mask)
79910: lwarx r5,0,r4
800 or r5,r5,r3
801 PPC405_ERR77(0,r4)
802 stwcx. r5,0,r4
803 bne- 10b
804 blr
805
806/*
807 * I/O string operations
808 *
809 * insb(port, buf, len)
810 * outsb(port, buf, len)
811 * insw(port, buf, len)
812 * outsw(port, buf, len)
813 * insl(port, buf, len)
814 * outsl(port, buf, len)
815 * insw_ns(port, buf, len)
816 * outsw_ns(port, buf, len)
817 * insl_ns(port, buf, len)
818 * outsl_ns(port, buf, len)
819 *
820 * The *_ns versions don't do byte-swapping.
821 */
822_GLOBAL(_insb)
823 cmpwi 0,r5,0
824 mtctr r5
825 subi r4,r4,1
826 blelr-
82700: lbz r5,0(r3)
828 eieio
829 stbu r5,1(r4)
830 bdnz 00b
831 blr
832
833_GLOBAL(_outsb)
834 cmpwi 0,r5,0
835 mtctr r5
836 subi r4,r4,1
837 blelr-
83800: lbzu r5,1(r4)
839 stb r5,0(r3)
840 eieio
841 bdnz 00b
842 blr
843
844_GLOBAL(_insw)
845 cmpwi 0,r5,0
846 mtctr r5
847 subi r4,r4,2
848 blelr-
84900: lhbrx r5,0,r3
850 eieio
851 sthu r5,2(r4)
852 bdnz 00b
853 blr
854
855_GLOBAL(_outsw)
856 cmpwi 0,r5,0
857 mtctr r5
858 subi r4,r4,2
859 blelr-
86000: lhzu r5,2(r4)
861 eieio
862 sthbrx r5,0,r3
863 bdnz 00b
864 blr
865
866_GLOBAL(_insl)
867 cmpwi 0,r5,0
868 mtctr r5
869 subi r4,r4,4
870 blelr-
87100: lwbrx r5,0,r3
872 eieio
873 stwu r5,4(r4)
874 bdnz 00b
875 blr
876
877_GLOBAL(_outsl)
878 cmpwi 0,r5,0
879 mtctr r5
880 subi r4,r4,4
881 blelr-
88200: lwzu r5,4(r4)
883 stwbrx r5,0,r3
884 eieio
885 bdnz 00b
886 blr
887
888_GLOBAL(__ide_mm_insw)
889_GLOBAL(_insw_ns)
890 cmpwi 0,r5,0
891 mtctr r5
892 subi r4,r4,2
893 blelr-
89400: lhz r5,0(r3)
895 eieio
896 sthu r5,2(r4)
897 bdnz 00b
898 blr
899
900_GLOBAL(__ide_mm_outsw)
901_GLOBAL(_outsw_ns)
902 cmpwi 0,r5,0
903 mtctr r5
904 subi r4,r4,2
905 blelr-
90600: lhzu r5,2(r4)
907 sth r5,0(r3)
908 eieio
909 bdnz 00b
910 blr
911
912_GLOBAL(__ide_mm_insl)
913_GLOBAL(_insl_ns)
914 cmpwi 0,r5,0
915 mtctr r5
916 subi r4,r4,4
917 blelr-
91800: lwz r5,0(r3)
919 eieio
920 stwu r5,4(r4)
921 bdnz 00b
922 blr
923
924_GLOBAL(__ide_mm_outsl)
925_GLOBAL(_outsl_ns)
926 cmpwi 0,r5,0
927 mtctr r5
928 subi r4,r4,4
929 blelr-
93000: lwzu r5,4(r4)
931 stw r5,0(r3)
932 eieio
933 bdnz 00b
934 blr
935
936/*
937 * Extended precision shifts.
938 *
939 * Updated to be valid for shift counts from 0 to 63 inclusive.
940 * -- Gabriel
941 *
942 * R3/R4 has 64 bit value
943 * R5 has shift count
944 * result in R3/R4
945 *
946 * ashrdi3: arithmetic right shift (sign propagation)
947 * lshrdi3: logical right shift
948 * ashldi3: left shift
949 */
950_GLOBAL(__ashrdi3)
951 subfic r6,r5,32
952 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
953 addi r7,r5,32 # could be xori, or addi with -32
954 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
955 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
956 sraw r7,r3,r7 # t2 = MSW >> (count-32)
957 or r4,r4,r6 # LSW |= t1
958 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
959 sraw r3,r3,r5 # MSW = MSW >> count
960 or r4,r4,r7 # LSW |= t2
961 blr
962
963_GLOBAL(__ashldi3)
964 subfic r6,r5,32
965 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
966 addi r7,r5,32 # could be xori, or addi with -32
967 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
968 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
969 or r3,r3,r6 # MSW |= t1
970 slw r4,r4,r5 # LSW = LSW << count
971 or r3,r3,r7 # MSW |= t2
972 blr
973
974_GLOBAL(__lshrdi3)
975 subfic r6,r5,32
976 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
977 addi r7,r5,32 # could be xori, or addi with -32
978 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
979 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
980 or r4,r4,r6 # LSW |= t1
981 srw r3,r3,r5 # MSW = MSW >> count
982 or r4,r4,r7 # LSW |= t2
983 blr
984
985_GLOBAL(abs)
986 srawi r4,r3,31
987 xor r3,r3,r4
988 sub r3,r3,r4
989 blr
990
991_GLOBAL(_get_SP)
992 mr r3,r1 /* Close enough */
993 blr
994
995/*
996 * Create a kernel thread
997 * kernel_thread(fn, arg, flags)
998 */
999_GLOBAL(kernel_thread)
1000 stwu r1,-16(r1)
1001 stw r30,8(r1)
1002 stw r31,12(r1)
1003 mr r30,r3 /* function */
1004 mr r31,r4 /* argument */
1005 ori r3,r5,CLONE_VM /* flags */
1006 oris r3,r3,CLONE_UNTRACED>>16
1007 li r4,0 /* new sp (unused) */
1008 li r0,__NR_clone
1009 sc
1010 cmpwi 0,r3,0 /* parent or child? */
1011 bne 1f /* return if parent */
1012 li r0,0 /* make top-level stack frame */
1013 stwu r0,-16(r1)
1014 mtlr r30 /* fn addr in lr */
1015 mr r3,r31 /* load arg and call fn */
1016 PPC440EP_ERR42
1017 blrl
1018 li r0,__NR_exit /* exit if function returns */
1019 li r3,0
1020 sc
10211: lwz r30,8(r1)
1022 lwz r31,12(r1)
1023 addi r1,r1,16
1024 blr
1025
1026_GLOBAL(execve)
1027 li r0,__NR_execve
1028 sc
1029 bnslr
1030 neg r3,r3
1031 blr
1032
1033/*
1034 * This routine is just here to keep GCC happy - sigh...
1035 */
1036_GLOBAL(__main)
1037 blr
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
new file mode 100644
index 000000000000..b3e95ff0dba0
--- /dev/null
+++ b/arch/powerpc/kernel/misc_64.S
@@ -0,0 +1,880 @@
1/*
2 * arch/powerpc/kernel/misc64.S
3 *
4 * This file contains miscellaneous low-level functions.
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
8 * and Paul Mackerras.
9 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
10 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19#include <linux/config.h>
20#include <linux/sys.h>
21#include <asm/unistd.h>
22#include <asm/errno.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/cache.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30
31 .text
32
33/*
34 * Returns (address we are running at) - (address we were linked at)
35 * for use before the text and data are mapped to KERNELBASE.
36 */
37
38_GLOBAL(reloc_offset)
39 mflr r0
40 bl 1f
411: mflr r3
42 LOADADDR(r4,1b)
43 subf r3,r4,r3
44 mtlr r0
45 blr
46
47/*
48 * add_reloc_offset(x) returns x + reloc_offset().
49 */
50_GLOBAL(add_reloc_offset)
51 mflr r0
52 bl 1f
531: mflr r5
54 LOADADDR(r4,1b)
55 subf r5,r4,r5
56 add r3,r3,r5
57 mtlr r0
58 blr
59
60_GLOBAL(get_msr)
61 mfmsr r3
62 blr
63
64_GLOBAL(get_dar)
65 mfdar r3
66 blr
67
68_GLOBAL(get_srr0)
69 mfsrr0 r3
70 blr
71
72_GLOBAL(get_srr1)
73 mfsrr1 r3
74 blr
75
76_GLOBAL(get_sp)
77 mr r3,r1
78 blr
79
80#ifdef CONFIG_IRQSTACKS
81_GLOBAL(call_do_softirq)
82 mflr r0
83 std r0,16(r1)
84 stdu r1,THREAD_SIZE-112(r3)
85 mr r1,r3
86 bl .__do_softirq
87 ld r1,0(r1)
88 ld r0,16(r1)
89 mtlr r0
90 blr
91
92_GLOBAL(call_handle_IRQ_event)
93 mflr r0
94 std r0,16(r1)
95 stdu r1,THREAD_SIZE-112(r6)
96 mr r1,r6
97 bl .handle_IRQ_event
98 ld r1,0(r1)
99 ld r0,16(r1)
100 mtlr r0
101 blr
102#endif /* CONFIG_IRQSTACKS */
103
104 /*
105 * To be called by C code which needs to do some operations with MMU
106 * disabled. Note that interrupts have to be disabled by the caller
107 * prior to calling us. The code called _MUST_ be in the RMO of course
108 * and part of the linear mapping as we don't attempt to translate the
109 * stack pointer at all. The function is called with the stack switched
110 * to this CPU emergency stack
111 *
112 * prototype is void *call_with_mmu_off(void *func, void *data);
113 *
114 * the called function is expected to be of the form
115 *
116 * void *called(void *data);
117 */
118_GLOBAL(call_with_mmu_off)
119 mflr r0 /* get link, save it on stackframe */
120 std r0,16(r1)
121 mr r1,r5 /* save old stack ptr */
122 ld r1,PACAEMERGSP(r13) /* get emerg. stack */
123 subi r1,r1,STACK_FRAME_OVERHEAD
124 std r0,16(r1) /* save link on emerg. stack */
125 std r5,0(r1) /* save old stack ptr in backchain */
126 ld r3,0(r3) /* get to real function ptr (assume same TOC) */
127 bl 2f /* we need LR to return, continue at label 2 */
128
129 ld r0,16(r1) /* we return here from the call, get LR and */
130 ld r1,0(r1) /* .. old stack ptr */
131 mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */
132 mfmsr r4
133 ori r4,r4,MSR_IR|MSR_DR
134 mtspr SPRN_SRR1,r4
135 rfid
136
1372: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */
138 mr r3,r4 /* get parameter */
139 mfmsr r0
140 ori r0,r0,MSR_IR|MSR_DR
141 xori r0,r0,MSR_IR|MSR_DR
142 mtspr SPRN_SRR1,r0
143 rfid
144
145
146 .section ".toc","aw"
147PPC64_CACHES:
148 .tc ppc64_caches[TC],ppc64_caches
149 .section ".text"
150
151/*
152 * Write any modified data cache blocks out to memory
153 * and invalidate the corresponding instruction cache blocks.
154 *
155 * flush_icache_range(unsigned long start, unsigned long stop)
156 *
157 * flush all bytes from start through stop-1 inclusive
158 */
159
160_KPROBE(__flush_icache_range)
161
162/*
163 * Flush the data cache to memory
164 *
165 * Different systems have different cache line sizes
166 * and in some cases i-cache and d-cache line sizes differ from
167 * each other.
168 */
169 ld r10,PPC64_CACHES@toc(r2)
170 lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
171 addi r5,r7,-1
172 andc r6,r3,r5 /* round low to line bdy */
173 subf r8,r6,r4 /* compute length */
174 add r8,r8,r5 /* ensure we get enough */
175 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
176 srw. r8,r8,r9 /* compute line count */
177 beqlr /* nothing to do? */
178 mtctr r8
1791: dcbst 0,r6
180 add r6,r6,r7
181 bdnz 1b
182 sync
183
184/* Now invalidate the instruction cache */
185
186 lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
187 addi r5,r7,-1
188 andc r6,r3,r5 /* round low to line bdy */
189 subf r8,r6,r4 /* compute length */
190 add r8,r8,r5
191 lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
192 srw. r8,r8,r9 /* compute line count */
193 beqlr /* nothing to do? */
194 mtctr r8
1952: icbi 0,r6
196 add r6,r6,r7
197 bdnz 2b
198 isync
199 blr
200 .previous .text
201/*
202 * Like above, but only do the D-cache.
203 *
204 * flush_dcache_range(unsigned long start, unsigned long stop)
205 *
206 * flush all bytes from start to stop-1 inclusive
207 */
208_GLOBAL(flush_dcache_range)
209
210/*
211 * Flush the data cache to memory
212 *
213 * Different systems have different cache line sizes
214 */
215 ld r10,PPC64_CACHES@toc(r2)
216 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
217 addi r5,r7,-1
218 andc r6,r3,r5 /* round low to line bdy */
219 subf r8,r6,r4 /* compute length */
220 add r8,r8,r5 /* ensure we get enough */
221 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
222 srw. r8,r8,r9 /* compute line count */
223 beqlr /* nothing to do? */
224 mtctr r8
2250: dcbst 0,r6
226 add r6,r6,r7
227 bdnz 0b
228 sync
229 blr
230
231/*
232 * Like above, but works on non-mapped physical addresses.
233 * Use only for non-LPAR setups ! It also assumes real mode
234 * is cacheable. Used for flushing out the DART before using
235 * it as uncacheable memory
236 *
237 * flush_dcache_phys_range(unsigned long start, unsigned long stop)
238 *
239 * flush all bytes from start to stop-1 inclusive
240 */
241_GLOBAL(flush_dcache_phys_range)
242 ld r10,PPC64_CACHES@toc(r2)
243 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
244 addi r5,r7,-1
245 andc r6,r3,r5 /* round low to line bdy */
246 subf r8,r6,r4 /* compute length */
247 add r8,r8,r5 /* ensure we get enough */
248 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
249 srw. r8,r8,r9 /* compute line count */
250 beqlr /* nothing to do? */
251 mfmsr r5 /* Disable MMU Data Relocation */
252 ori r0,r5,MSR_DR
253 xori r0,r0,MSR_DR
254 sync
255 mtmsr r0
256 sync
257 isync
258 mtctr r8
2590: dcbst 0,r6
260 add r6,r6,r7
261 bdnz 0b
262 sync
263 isync
264 mtmsr r5 /* Re-enable MMU Data Relocation */
265 sync
266 isync
267 blr
268
269_GLOBAL(flush_inval_dcache_range)
270 ld r10,PPC64_CACHES@toc(r2)
271 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
272 addi r5,r7,-1
273 andc r6,r3,r5 /* round low to line bdy */
274 subf r8,r6,r4 /* compute length */
275 add r8,r8,r5 /* ensure we get enough */
276 lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
277 srw. r8,r8,r9 /* compute line count */
278 beqlr /* nothing to do? */
279 sync
280 isync
281 mtctr r8
2820: dcbf 0,r6
283 add r6,r6,r7
284 bdnz 0b
285 sync
286 isync
287 blr
288
289
290/*
291 * Flush a particular page from the data cache to RAM.
292 * Note: this is necessary because the instruction cache does *not*
293 * snoop from the data cache.
294 *
295 * void __flush_dcache_icache(void *page)
296 */
297_GLOBAL(__flush_dcache_icache)
298/*
299 * Flush the data cache to memory
300 *
301 * Different systems have different cache line sizes
302 */
303
304/* Flush the dcache */
305 ld r7,PPC64_CACHES@toc(r2)
306 clrrdi r3,r3,PAGE_SHIFT /* Page align */
307 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
308 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
309 mr r6,r3
310 mtctr r4
3110: dcbst 0,r6
312 add r6,r6,r5
313 bdnz 0b
314 sync
315
316/* Now invalidate the icache */
317
318 lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
319 lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
320 mtctr r4
3211: icbi 0,r3
322 add r3,r3,r5
323 bdnz 1b
324 isync
325 blr
326
327/*
328 * I/O string operations
329 *
330 * insb(port, buf, len)
331 * outsb(port, buf, len)
332 * insw(port, buf, len)
333 * outsw(port, buf, len)
334 * insl(port, buf, len)
335 * outsl(port, buf, len)
336 * insw_ns(port, buf, len)
337 * outsw_ns(port, buf, len)
338 * insl_ns(port, buf, len)
339 * outsl_ns(port, buf, len)
340 *
341 * The *_ns versions don't do byte-swapping.
342 */
343_GLOBAL(_insb)
344 cmpwi 0,r5,0
345 mtctr r5
346 subi r4,r4,1
347 blelr-
34800: lbz r5,0(r3)
349 eieio
350 stbu r5,1(r4)
351 bdnz 00b
352 twi 0,r5,0
353 isync
354 blr
355
356_GLOBAL(_outsb)
357 cmpwi 0,r5,0
358 mtctr r5
359 subi r4,r4,1
360 blelr-
36100: lbzu r5,1(r4)
362 stb r5,0(r3)
363 bdnz 00b
364 sync
365 blr
366
367_GLOBAL(_insw)
368 cmpwi 0,r5,0
369 mtctr r5
370 subi r4,r4,2
371 blelr-
37200: lhbrx r5,0,r3
373 eieio
374 sthu r5,2(r4)
375 bdnz 00b
376 twi 0,r5,0
377 isync
378 blr
379
380_GLOBAL(_outsw)
381 cmpwi 0,r5,0
382 mtctr r5
383 subi r4,r4,2
384 blelr-
38500: lhzu r5,2(r4)
386 sthbrx r5,0,r3
387 bdnz 00b
388 sync
389 blr
390
391_GLOBAL(_insl)
392 cmpwi 0,r5,0
393 mtctr r5
394 subi r4,r4,4
395 blelr-
39600: lwbrx r5,0,r3
397 eieio
398 stwu r5,4(r4)
399 bdnz 00b
400 twi 0,r5,0
401 isync
402 blr
403
404_GLOBAL(_outsl)
405 cmpwi 0,r5,0
406 mtctr r5
407 subi r4,r4,4
408 blelr-
40900: lwzu r5,4(r4)
410 stwbrx r5,0,r3
411 bdnz 00b
412 sync
413 blr
414
415/* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */
416_GLOBAL(_insw_ns)
417 cmpwi 0,r5,0
418 mtctr r5
419 subi r4,r4,2
420 blelr-
42100: lhz r5,0(r3)
422 eieio
423 sthu r5,2(r4)
424 bdnz 00b
425 twi 0,r5,0
426 isync
427 blr
428
429/* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */
430_GLOBAL(_outsw_ns)
431 cmpwi 0,r5,0
432 mtctr r5
433 subi r4,r4,2
434 blelr-
43500: lhzu r5,2(r4)
436 sth r5,0(r3)
437 bdnz 00b
438 sync
439 blr
440
441_GLOBAL(_insl_ns)
442 cmpwi 0,r5,0
443 mtctr r5
444 subi r4,r4,4
445 blelr-
44600: lwz r5,0(r3)
447 eieio
448 stwu r5,4(r4)
449 bdnz 00b
450 twi 0,r5,0
451 isync
452 blr
453
454_GLOBAL(_outsl_ns)
455 cmpwi 0,r5,0
456 mtctr r5
457 subi r4,r4,4
458 blelr-
45900: lwzu r5,4(r4)
460 stw r5,0(r3)
461 bdnz 00b
462 sync
463 blr
464
465/*
466 * identify_cpu and calls setup_cpu
467 * In: r3 = base of the cpu_specs array
468 * r4 = address of cur_cpu_spec
469 * r5 = relocation offset
470 */
471_GLOBAL(identify_cpu)
472 mfpvr r7
4731:
474 lwz r8,CPU_SPEC_PVR_MASK(r3)
475 and r8,r8,r7
476 lwz r9,CPU_SPEC_PVR_VALUE(r3)
477 cmplw 0,r9,r8
478 beq 1f
479 addi r3,r3,CPU_SPEC_ENTRY_SIZE
480 b 1b
4811:
482 sub r0,r3,r5
483 std r0,0(r4)
484 ld r4,CPU_SPEC_SETUP(r3)
485 add r4,r4,r5
486 ld r4,0(r4)
487 add r4,r4,r5
488 mtctr r4
489 /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
490 mr r4,r3
491 mr r3,r5
492 bctr
493
494/*
495 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
496 * and writes nop's over sections of code that don't apply for this cpu.
497 * r3 = data offset (not changed)
498 */
499_GLOBAL(do_cpu_ftr_fixups)
500 /* Get CPU 0 features */
501 LOADADDR(r6,cur_cpu_spec)
502 sub r6,r6,r3
503 ld r4,0(r6)
504 sub r4,r4,r3
505 ld r4,CPU_SPEC_FEATURES(r4)
506 /* Get the fixup table */
507 LOADADDR(r6,__start___ftr_fixup)
508 sub r6,r6,r3
509 LOADADDR(r7,__stop___ftr_fixup)
510 sub r7,r7,r3
511 /* Do the fixup */
5121: cmpld r6,r7
513 bgelr
514 addi r6,r6,32
515 ld r8,-32(r6) /* mask */
516 and r8,r8,r4
517 ld r9,-24(r6) /* value */
518 cmpld r8,r9
519 beq 1b
520 ld r8,-16(r6) /* section begin */
521 ld r9,-8(r6) /* section end */
522 subf. r9,r8,r9
523 beq 1b
524 /* write nops over the section of code */
525 /* todo: if large section, add a branch at the start of it */
526 srwi r9,r9,2
527 mtctr r9
528 sub r8,r8,r3
529 lis r0,0x60000000@h /* nop */
5303: stw r0,0(r8)
531 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
532 beq 2f
533 dcbst 0,r8 /* suboptimal, but simpler */
534 sync
535 icbi 0,r8
5362: addi r8,r8,4
537 bdnz 3b
538 sync /* additional sync needed on g4 */
539 isync
540 b 1b
541
542#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
543/*
544 * Do an IO access in real mode
545 */
546_GLOBAL(real_readb)
547 mfmsr r7
548 ori r0,r7,MSR_DR
549 xori r0,r0,MSR_DR
550 sync
551 mtmsrd r0
552 sync
553 isync
554 mfspr r6,SPRN_HID4
555 rldicl r5,r6,32,0
556 ori r5,r5,0x100
557 rldicl r5,r5,32,0
558 sync
559 mtspr SPRN_HID4,r5
560 isync
561 slbia
562 isync
563 lbz r3,0(r3)
564 sync
565 mtspr SPRN_HID4,r6
566 isync
567 slbia
568 isync
569 mtmsrd r7
570 sync
571 isync
572 blr
573
574 /*
575 * Do an IO access in real mode
576 */
577_GLOBAL(real_writeb)
578 mfmsr r7
579 ori r0,r7,MSR_DR
580 xori r0,r0,MSR_DR
581 sync
582 mtmsrd r0
583 sync
584 isync
585 mfspr r6,SPRN_HID4
586 rldicl r5,r6,32,0
587 ori r5,r5,0x100
588 rldicl r5,r5,32,0
589 sync
590 mtspr SPRN_HID4,r5
591 isync
592 slbia
593 isync
594 stb r3,0(r4)
595 sync
596 mtspr SPRN_HID4,r6
597 isync
598 slbia
599 isync
600 mtmsrd r7
601 sync
602 isync
603 blr
604#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
605
606/*
607 * Create a kernel thread
608 * kernel_thread(fn, arg, flags)
609 */
610_GLOBAL(kernel_thread)
611 std r29,-24(r1)
612 std r30,-16(r1)
613 stdu r1,-STACK_FRAME_OVERHEAD(r1)
614 mr r29,r3
615 mr r30,r4
616 ori r3,r5,CLONE_VM /* flags */
617 oris r3,r3,(CLONE_UNTRACED>>16)
618 li r4,0 /* new sp (unused) */
619 li r0,__NR_clone
620 sc
621 cmpdi 0,r3,0 /* parent or child? */
622 bne 1f /* return if parent */
623 li r0,0
624 stdu r0,-STACK_FRAME_OVERHEAD(r1)
625 ld r2,8(r29)
626 ld r29,0(r29)
627 mtlr r29 /* fn addr in lr */
628 mr r3,r30 /* load arg and call fn */
629 blrl
630 li r0,__NR_exit /* exit after child exits */
631 li r3,0
632 sc
6331: addi r1,r1,STACK_FRAME_OVERHEAD
634 ld r29,-24(r1)
635 ld r30,-16(r1)
636 blr
637
638/*
639 * disable_kernel_fp()
640 * Disable the FPU.
641 */
642_GLOBAL(disable_kernel_fp)
643 mfmsr r3
644 rldicl r0,r3,(63-MSR_FP_LG),1
645 rldicl r3,r0,(MSR_FP_LG+1),0
646 mtmsrd r3 /* disable use of fpu now */
647 isync
648 blr
649
650#ifdef CONFIG_ALTIVEC
651
652#if 0 /* this has no callers for now */
653/*
654 * disable_kernel_altivec()
655 * Disable the VMX.
656 */
657_GLOBAL(disable_kernel_altivec)
658 mfmsr r3
659 rldicl r0,r3,(63-MSR_VEC_LG),1
660 rldicl r3,r0,(MSR_VEC_LG+1),0
661 mtmsrd r3 /* disable use of VMX now */
662 isync
663 blr
664#endif /* 0 */
665
666/*
667 * giveup_altivec(tsk)
668 * Disable VMX for the task given as the argument,
669 * and save the vector registers in its thread_struct.
670 * Enables the VMX for use in the kernel on return.
671 */
672_GLOBAL(giveup_altivec)
673 mfmsr r5
674 oris r5,r5,MSR_VEC@h
675 mtmsrd r5 /* enable use of VMX now */
676 isync
677 cmpdi 0,r3,0
678 beqlr- /* if no previous owner, done */
679 addi r3,r3,THREAD /* want THREAD of task */
680 ld r5,PT_REGS(r3)
681 cmpdi 0,r5,0
682 SAVE_32VRS(0,r4,r3)
683 mfvscr vr0
684 li r4,THREAD_VSCR
685 stvx vr0,r4,r3
686 beq 1f
687 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
688 lis r3,MSR_VEC@h
689 andc r4,r4,r3 /* disable FP for previous task */
690 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
6911:
692#ifndef CONFIG_SMP
693 li r5,0
694 ld r4,last_task_used_altivec@got(r2)
695 std r5,0(r4)
696#endif /* CONFIG_SMP */
697 blr
698
699#endif /* CONFIG_ALTIVEC */
700
701_GLOBAL(__setup_cpu_power3)
702 blr
703
704_GLOBAL(execve)
705 li r0,__NR_execve
706 sc
707 bnslr
708 neg r3,r3
709 blr
710
711/* kexec_wait(phys_cpu)
712 *
713 * wait for the flag to change, indicating this kernel is going away but
714 * the slave code for the next one is at addresses 0 to 100.
715 *
716 * This is used by all slaves.
717 *
718 * Physical (hardware) cpu id should be in r3.
719 */
720_GLOBAL(kexec_wait)
721 bl 1f
7221: mflr r5
723 addi r5,r5,kexec_flag-1b
724
72599: HMT_LOW
726#ifdef CONFIG_KEXEC /* use no memory without kexec */
727 lwz r4,0(r5)
728 cmpwi 0,r4,0
729 bnea 0x60
730#endif
731 b 99b
732
733/* this can be in text because we won't change it until we are
734 * running in real anyways
735 */
736kexec_flag:
737 .long 0
738
739
740#ifdef CONFIG_KEXEC
741
742/* kexec_smp_wait(void)
743 *
744 * call with interrupts off
745 * note: this is a terminal routine, it does not save lr
746 *
747 * get phys id from paca
748 * set paca id to -1 to say we got here
749 * switch to real mode
750 * join other cpus in kexec_wait(phys_id)
751 */
752_GLOBAL(kexec_smp_wait)
753 lhz r3,PACAHWCPUID(r13)
754 li r4,-1
755 sth r4,PACAHWCPUID(r13) /* let others know we left */
756 bl real_mode
757 b .kexec_wait
758
759/*
760 * switch to real mode (turn mmu off)
761 * we use the early kernel trick that the hardware ignores bits
762 * 0 and 1 (big endian) of the effective address in real mode
763 *
764 * don't overwrite r3 here, it is live for kexec_wait above.
765 */
766real_mode: /* assume normal blr return */
7671: li r9,MSR_RI
768 li r10,MSR_DR|MSR_IR
769 mflr r11 /* return address to SRR0 */
770 mfmsr r12
771 andc r9,r12,r9
772 andc r10,r12,r10
773
774 mtmsrd r9,1
775 mtspr SPRN_SRR1,r10
776 mtspr SPRN_SRR0,r11
777 rfid
778
779
780/*
781 * kexec_sequence(newstack, start, image, control, clear_all())
782 *
783 * does the grungy work with stack switching and real mode switches
784 * also does simple calls to other code
785 */
786
787_GLOBAL(kexec_sequence)
788 mflr r0
789 std r0,16(r1)
790
791 /* switch stacks to newstack -- &kexec_stack.stack */
792 stdu r1,THREAD_SIZE-112(r3)
793 mr r1,r3
794
795 li r0,0
796 std r0,16(r1)
797
798 /* save regs for local vars on new stack.
799 * yes, we won't go back, but ...
800 */
801 std r31,-8(r1)
802 std r30,-16(r1)
803 std r29,-24(r1)
804 std r28,-32(r1)
805 std r27,-40(r1)
806 std r26,-48(r1)
807 std r25,-56(r1)
808
809 stdu r1,-112-64(r1)
810
811 /* save args into preserved regs */
812 mr r31,r3 /* newstack (both) */
813 mr r30,r4 /* start (real) */
814 mr r29,r5 /* image (virt) */
815 mr r28,r6 /* control, unused */
816 mr r27,r7 /* clear_all() fn desc */
817 mr r26,r8 /* spare */
818 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
819
820 /* disable interrupts, we are overwriting kernel data next */
821 mfmsr r3
822 rlwinm r3,r3,0,17,15
823 mtmsrd r3,1
824
825 /* copy dest pages, flush whole dest image */
826 mr r3,r29
827 bl .kexec_copy_flush /* (image) */
828
829 /* turn off mmu */
830 bl real_mode
831
832 /* clear out hardware hash page table and tlb */
833 ld r5,0(r27) /* deref function descriptor */
834 mtctr r5
835 bctrl /* ppc_md.hash_clear_all(void); */
836
837/*
838 * kexec image calling is:
839 * the first 0x100 bytes of the entry point are copied to 0
840 *
841 * all slaves branch to slave = 0x60 (absolute)
842 * slave(phys_cpu_id);
843 *
844 * master goes to start = entry point
845 * start(phys_cpu_id, start, 0);
846 *
847 *
848 * a wrapper is needed to call existing kernels, here is an approximate
849 * description of one method:
850 *
851 * v2: (2.6.10)
852 * start will be near the boot_block (maybe 0x100 bytes before it?)
853 * it will have a 0x60, which will b to boot_block, where it will wait
854 * and 0 will store phys into struct boot-block and load r3 from there,
855 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
856 *
857 * v1: (2.6.9)
858 * boot block will have all cpus scanning device tree to see if they
859 * are the boot cpu ?????
860 * other device tree differences (prop sizes, va vs pa, etc)...
861 */
862
863 /* copy 0x100 bytes starting at start to 0 */
864 li r3,0
865 mr r4,r30
866 li r5,0x100
867 li r6,0
868 bl .copy_and_flush /* (dest, src, copy limit, start offset) */
8691: /* assume normal blr return */
870
871 /* release other cpus to the new kernel secondary start at 0x60 */
872 mflr r5
873 li r6,1
874 stw r6,kexec_flag-1b(5)
875 mr r3,r25 # my phys cpu
876 mr r4,r30 # start, aka phys mem offset
877 mtlr 4
878 li r5,0
879 blr /* image->start(physid, image->start, 0); */
880#endif /* CONFIG_KEXEC */
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
new file mode 100644
index 000000000000..7065e40e2f42
--- /dev/null
+++ b/arch/powerpc/kernel/of_device.c
@@ -0,0 +1,276 @@
1#include <linux/config.h>
2#include <linux/string.h>
3#include <linux/kernel.h>
4#include <linux/init.h>
5#include <linux/module.h>
6#include <linux/mod_devicetable.h>
7#include <linux/slab.h>
8
9#include <asm/errno.h>
10#include <asm/of_device.h>
11
12/**
13 * of_match_device - Tell if an of_device structure has a matching
14 * of_match structure
15 * @ids: array of of device match structures to search in
16 * @dev: the of device structure to match against
17 *
18 * Used by a driver to check whether an of_device present in the
19 * system is in its list of supported devices.
20 */
21const struct of_device_id *of_match_device(const struct of_device_id *matches,
22 const struct of_device *dev)
23{
24 if (!dev->node)
25 return NULL;
26 while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
27 int match = 1;
28 if (matches->name[0])
29 match &= dev->node->name
30 && !strcmp(matches->name, dev->node->name);
31 if (matches->type[0])
32 match &= dev->node->type
33 && !strcmp(matches->type, dev->node->type);
34 if (matches->compatible[0])
35 match &= device_is_compatible(dev->node,
36 matches->compatible);
37 if (match)
38 return matches;
39 matches++;
40 }
41 return NULL;
42}
43
44static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
45{
46 struct of_device * of_dev = to_of_device(dev);
47 struct of_platform_driver * of_drv = to_of_platform_driver(drv);
48 const struct of_device_id * matches = of_drv->match_table;
49
50 if (!matches)
51 return 0;
52
53 return of_match_device(matches, of_dev) != NULL;
54}
55
56struct of_device *of_dev_get(struct of_device *dev)
57{
58 struct device *tmp;
59
60 if (!dev)
61 return NULL;
62 tmp = get_device(&dev->dev);
63 if (tmp)
64 return to_of_device(tmp);
65 else
66 return NULL;
67}
68
69void of_dev_put(struct of_device *dev)
70{
71 if (dev)
72 put_device(&dev->dev);
73}
74
75
76static int of_device_probe(struct device *dev)
77{
78 int error = -ENODEV;
79 struct of_platform_driver *drv;
80 struct of_device *of_dev;
81 const struct of_device_id *match;
82
83 drv = to_of_platform_driver(dev->driver);
84 of_dev = to_of_device(dev);
85
86 if (!drv->probe)
87 return error;
88
89 of_dev_get(of_dev);
90
91 match = of_match_device(drv->match_table, of_dev);
92 if (match)
93 error = drv->probe(of_dev, match);
94 if (error)
95 of_dev_put(of_dev);
96
97 return error;
98}
99
100static int of_device_remove(struct device *dev)
101{
102 struct of_device * of_dev = to_of_device(dev);
103 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
104
105 if (dev->driver && drv->remove)
106 drv->remove(of_dev);
107 return 0;
108}
109
110static int of_device_suspend(struct device *dev, pm_message_t state)
111{
112 struct of_device * of_dev = to_of_device(dev);
113 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
114 int error = 0;
115
116 if (dev->driver && drv->suspend)
117 error = drv->suspend(of_dev, state);
118 return error;
119}
120
121static int of_device_resume(struct device * dev)
122{
123 struct of_device * of_dev = to_of_device(dev);
124 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
125 int error = 0;
126
127 if (dev->driver && drv->resume)
128 error = drv->resume(of_dev);
129 return error;
130}
131
132struct bus_type of_platform_bus_type = {
133 .name = "of_platform",
134 .match = of_platform_bus_match,
135 .suspend = of_device_suspend,
136 .resume = of_device_resume,
137};
138
139static int __init of_bus_driver_init(void)
140{
141 return bus_register(&of_platform_bus_type);
142}
143
144postcore_initcall(of_bus_driver_init);
145
146int of_register_driver(struct of_platform_driver *drv)
147{
148 int count = 0;
149
150 /* initialize common driver fields */
151 drv->driver.name = drv->name;
152 drv->driver.bus = &of_platform_bus_type;
153 drv->driver.probe = of_device_probe;
154 drv->driver.remove = of_device_remove;
155
156 /* register with core */
157 count = driver_register(&drv->driver);
158 return count ? count : 1;
159}
160
161void of_unregister_driver(struct of_platform_driver *drv)
162{
163 driver_unregister(&drv->driver);
164}
165
166
167static ssize_t dev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
168{
169 struct of_device *ofdev;
170
171 ofdev = to_of_device(dev);
172 return sprintf(buf, "%s", ofdev->node->full_name);
173}
174
175static DEVICE_ATTR(devspec, S_IRUGO, dev_show_devspec, NULL);
176
177/**
178 * of_release_dev - free an of device structure when all users of it are finished.
179 * @dev: device that's been disconnected
180 *
181 * Will be called only by the device core when all users of this of device are
182 * done.
183 */
184void of_release_dev(struct device *dev)
185{
186 struct of_device *ofdev;
187
188 ofdev = to_of_device(dev);
189 of_node_put(ofdev->node);
190 kfree(ofdev);
191}
192
193int of_device_register(struct of_device *ofdev)
194{
195 int rc;
196 struct of_device **odprop;
197
198 BUG_ON(ofdev->node == NULL);
199
200 odprop = (struct of_device **)get_property(ofdev->node, "linux,device", NULL);
201 if (!odprop) {
202 struct property *new_prop;
203
204 new_prop = kmalloc(sizeof(struct property) + sizeof(struct of_device *),
205 GFP_KERNEL);
206 if (new_prop == NULL)
207 return -ENOMEM;
208 new_prop->name = "linux,device";
209 new_prop->length = sizeof(sizeof(struct of_device *));
210 new_prop->value = (unsigned char *)&new_prop[1];
211 odprop = (struct of_device **)new_prop->value;
212 *odprop = NULL;
213 prom_add_property(ofdev->node, new_prop);
214 }
215 *odprop = ofdev;
216
217 rc = device_register(&ofdev->dev);
218 if (rc)
219 return rc;
220
221 device_create_file(&ofdev->dev, &dev_attr_devspec);
222
223 return 0;
224}
225
226void of_device_unregister(struct of_device *ofdev)
227{
228 struct of_device **odprop;
229
230 device_remove_file(&ofdev->dev, &dev_attr_devspec);
231
232 odprop = (struct of_device **)get_property(ofdev->node, "linux,device", NULL);
233 if (odprop)
234 *odprop = NULL;
235
236 device_unregister(&ofdev->dev);
237}
238
239struct of_device* of_platform_device_create(struct device_node *np,
240 const char *bus_id,
241 struct device *parent)
242{
243 struct of_device *dev;
244
245 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
246 if (!dev)
247 return NULL;
248 memset(dev, 0, sizeof(*dev));
249
250 dev->node = of_node_get(np);
251 dev->dma_mask = 0xffffffffUL;
252 dev->dev.dma_mask = &dev->dma_mask;
253 dev->dev.parent = parent;
254 dev->dev.bus = &of_platform_bus_type;
255 dev->dev.release = of_release_dev;
256
257 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
258
259 if (of_device_register(dev) != 0) {
260 kfree(dev);
261 return NULL;
262 }
263
264 return dev;
265}
266
267EXPORT_SYMBOL(of_match_device);
268EXPORT_SYMBOL(of_platform_bus_type);
269EXPORT_SYMBOL(of_register_driver);
270EXPORT_SYMBOL(of_unregister_driver);
271EXPORT_SYMBOL(of_device_register);
272EXPORT_SYMBOL(of_device_unregister);
273EXPORT_SYMBOL(of_dev_get);
274EXPORT_SYMBOL(of_dev_put);
275EXPORT_SYMBOL(of_platform_device_create);
276EXPORT_SYMBOL(of_release_dev);
diff --git a/arch/powerpc/kernel/pmc.c b/arch/powerpc/kernel/pmc.c
new file mode 100644
index 000000000000..2d333cc84082
--- /dev/null
+++ b/arch/powerpc/kernel/pmc.c
@@ -0,0 +1,112 @@
1/*
2 * arch/powerpc/kernel/pmc.c
3 *
4 * Copyright (C) 2004 David Gibson, IBM Corporation.
5 * Includes code formerly from arch/ppc/kernel/perfmon.c:
6 * Author: Andy Fleming
7 * Copyright (c) 2004 Freescale Semiconductor, Inc
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/config.h>
16#include <linux/errno.h>
17#include <linux/spinlock.h>
18#include <linux/module.h>
19
20#include <asm/processor.h>
21#include <asm/pmc.h>
22
23#if defined(CONFIG_FSL_BOOKE) && !defined(CONFIG_E200)
24static void dummy_perf(struct pt_regs *regs)
25{
26 unsigned int pmgc0 = mfpmr(PMRN_PMGC0);
27
28 pmgc0 &= ~PMGC0_PMIE;
29 mtpmr(PMRN_PMGC0, pmgc0);
30}
31#elif defined(CONFIG_PPC64) || defined(CONFIG_6xx)
32
33#ifndef MMCR0_PMAO
34#define MMCR0_PMAO 0
35#endif
36
37/* Ensure exceptions are disabled */
38static void dummy_perf(struct pt_regs *regs)
39{
40 unsigned int mmcr0 = mfspr(SPRN_MMCR0);
41
42 mmcr0 &= ~(MMCR0_PMXE|MMCR0_PMAO);
43 mtspr(SPRN_MMCR0, mmcr0);
44}
45#else
46static void dummy_perf(struct pt_regs *regs)
47{
48}
49#endif
50
51static DEFINE_SPINLOCK(pmc_owner_lock);
52static void *pmc_owner_caller; /* mostly for debugging */
53perf_irq_t perf_irq = dummy_perf;
54
55int reserve_pmc_hardware(perf_irq_t new_perf_irq)
56{
57 int err = 0;
58
59 spin_lock(&pmc_owner_lock);
60
61 if (pmc_owner_caller) {
62 printk(KERN_WARNING "reserve_pmc_hardware: "
63 "PMC hardware busy (reserved by caller %p)\n",
64 pmc_owner_caller);
65 err = -EBUSY;
66 goto out;
67 }
68
69 pmc_owner_caller = __builtin_return_address(0);
70 perf_irq = new_perf_irq ? : dummy_perf;
71
72 out:
73 spin_unlock(&pmc_owner_lock);
74 return err;
75}
76EXPORT_SYMBOL_GPL(reserve_pmc_hardware);
77
78void release_pmc_hardware(void)
79{
80 spin_lock(&pmc_owner_lock);
81
82 WARN_ON(! pmc_owner_caller);
83
84 pmc_owner_caller = NULL;
85 perf_irq = dummy_perf;
86
87 spin_unlock(&pmc_owner_lock);
88}
89EXPORT_SYMBOL_GPL(release_pmc_hardware);
90
91#ifdef CONFIG_PPC64
92void power4_enable_pmcs(void)
93{
94 unsigned long hid0;
95
96 hid0 = mfspr(SPRN_HID0);
97 hid0 |= 1UL << (63 - 20);
98
99 /* POWER4 requires the following sequence */
100 asm volatile(
101 "sync\n"
102 "mtspr %1, %0\n"
103 "mfspr %0, %1\n"
104 "mfspr %0, %1\n"
105 "mfspr %0, %1\n"
106 "mfspr %0, %1\n"
107 "mfspr %0, %1\n"
108 "mfspr %0, %1\n"
109 "isync" : "=&r" (hid0) : "i" (SPRN_HID0), "0" (hid0):
110 "memory");
111}
112#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
new file mode 100644
index 000000000000..8bc540337ba0
--- /dev/null
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -0,0 +1,273 @@
1#include <linux/config.h>
2#include <linux/module.h>
3#include <linux/threads.h>
4#include <linux/smp.h>
5#include <linux/sched.h>
6#include <linux/elfcore.h>
7#include <linux/string.h>
8#include <linux/interrupt.h>
9#include <linux/tty.h>
10#include <linux/vt_kern.h>
11#include <linux/nvram.h>
12#include <linux/console.h>
13#include <linux/irq.h>
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include <linux/ide.h>
17#include <linux/bitops.h>
18
19#include <asm/page.h>
20#include <asm/semaphore.h>
21#include <asm/processor.h>
22#include <asm/uaccess.h>
23#include <asm/io.h>
24#include <asm/ide.h>
25#include <asm/atomic.h>
26#include <asm/checksum.h>
27#include <asm/pgtable.h>
28#include <asm/tlbflush.h>
29#include <linux/adb.h>
30#include <linux/cuda.h>
31#include <linux/pmu.h>
32#include <asm/prom.h>
33#include <asm/system.h>
34#include <asm/pci-bridge.h>
35#include <asm/irq.h>
36#include <asm/pmac_feature.h>
37#include <asm/dma.h>
38#include <asm/machdep.h>
39#include <asm/hw_irq.h>
40#include <asm/nvram.h>
41#include <asm/mmu_context.h>
42#include <asm/backlight.h>
43#include <asm/time.h>
44#include <asm/cputable.h>
45#include <asm/btext.h>
46#include <asm/div64.h>
47
48#ifdef CONFIG_8xx
49#include <asm/commproc.h>
50#endif
51
52#ifdef CONFIG_PPC32
53extern void transfer_to_handler(void);
54extern void do_IRQ(struct pt_regs *regs);
55extern void machine_check_exception(struct pt_regs *regs);
56extern void alignment_exception(struct pt_regs *regs);
57extern void program_check_exception(struct pt_regs *regs);
58extern void single_step_exception(struct pt_regs *regs);
59extern int do_signal(sigset_t *, struct pt_regs *);
60extern int pmac_newworld;
61extern int sys_sigreturn(struct pt_regs *regs);
62
63EXPORT_SYMBOL(clear_pages);
64EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
65EXPORT_SYMBOL(DMA_MODE_READ);
66EXPORT_SYMBOL(DMA_MODE_WRITE);
67EXPORT_SYMBOL(__div64_32);
68
69EXPORT_SYMBOL(do_signal);
70EXPORT_SYMBOL(transfer_to_handler);
71EXPORT_SYMBOL(do_IRQ);
72EXPORT_SYMBOL(machine_check_exception);
73EXPORT_SYMBOL(alignment_exception);
74EXPORT_SYMBOL(program_check_exception);
75EXPORT_SYMBOL(single_step_exception);
76EXPORT_SYMBOL(sys_sigreturn);
77#endif
78
79#if defined(CONFIG_PPC_PREP)
80EXPORT_SYMBOL(_prep_type);
81EXPORT_SYMBOL(ucSystemType);
82#endif
83
84#if !defined(__INLINE_BITOPS)
85EXPORT_SYMBOL(set_bit);
86EXPORT_SYMBOL(clear_bit);
87EXPORT_SYMBOL(change_bit);
88EXPORT_SYMBOL(test_and_set_bit);
89EXPORT_SYMBOL(test_and_clear_bit);
90EXPORT_SYMBOL(test_and_change_bit);
91#endif /* __INLINE_BITOPS */
92
93EXPORT_SYMBOL(strcpy);
94EXPORT_SYMBOL(strncpy);
95EXPORT_SYMBOL(strcat);
96EXPORT_SYMBOL(strncat);
97EXPORT_SYMBOL(strchr);
98EXPORT_SYMBOL(strrchr);
99EXPORT_SYMBOL(strpbrk);
100EXPORT_SYMBOL(strstr);
101EXPORT_SYMBOL(strlen);
102EXPORT_SYMBOL(strnlen);
103EXPORT_SYMBOL(strcmp);
104EXPORT_SYMBOL(strncmp);
105EXPORT_SYMBOL(strcasecmp);
106
107EXPORT_SYMBOL(csum_partial);
108EXPORT_SYMBOL(csum_partial_copy_generic);
109EXPORT_SYMBOL(ip_fast_csum);
110EXPORT_SYMBOL(csum_tcpudp_magic);
111
112EXPORT_SYMBOL(__copy_tofrom_user);
113EXPORT_SYMBOL(__clear_user);
114EXPORT_SYMBOL(__strncpy_from_user);
115EXPORT_SYMBOL(__strnlen_user);
116
117EXPORT_SYMBOL(_insb);
118EXPORT_SYMBOL(_outsb);
119EXPORT_SYMBOL(_insw);
120EXPORT_SYMBOL(_outsw);
121EXPORT_SYMBOL(_insl);
122EXPORT_SYMBOL(_outsl);
123EXPORT_SYMBOL(_insw_ns);
124EXPORT_SYMBOL(_outsw_ns);
125EXPORT_SYMBOL(_insl_ns);
126EXPORT_SYMBOL(_outsl_ns);
127EXPORT_SYMBOL(ioremap);
128#ifdef CONFIG_44x
129EXPORT_SYMBOL(ioremap64);
130#endif
131EXPORT_SYMBOL(__ioremap);
132EXPORT_SYMBOL(iounmap);
133#ifdef CONFIG_PPC32
134EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
135#endif
136
137#if defined(CONFIG_PPC32) && (defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE))
138EXPORT_SYMBOL(ppc_ide_md);
139#endif
140
141#if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
142EXPORT_SYMBOL(isa_io_base);
143EXPORT_SYMBOL(isa_mem_base);
144EXPORT_SYMBOL(pci_dram_offset);
145EXPORT_SYMBOL(pci_alloc_consistent);
146EXPORT_SYMBOL(pci_free_consistent);
147EXPORT_SYMBOL(pci_bus_io_base);
148EXPORT_SYMBOL(pci_bus_io_base_phys);
149EXPORT_SYMBOL(pci_bus_mem_base_phys);
150EXPORT_SYMBOL(pci_bus_to_hose);
151EXPORT_SYMBOL(pci_resource_to_bus);
152EXPORT_SYMBOL(pci_phys_to_bus);
153EXPORT_SYMBOL(pci_bus_to_phys);
154#endif /* CONFIG_PCI */
155
156#ifdef CONFIG_NOT_COHERENT_CACHE
157EXPORT_SYMBOL(flush_dcache_all);
158#endif
159
160EXPORT_SYMBOL(start_thread);
161EXPORT_SYMBOL(kernel_thread);
162
163EXPORT_SYMBOL(giveup_fpu);
164#ifdef CONFIG_ALTIVEC
165EXPORT_SYMBOL(giveup_altivec);
166#endif /* CONFIG_ALTIVEC */
167#ifdef CONFIG_SPE
168EXPORT_SYMBOL(giveup_spe);
169#endif /* CONFIG_SPE */
170
171#ifdef CONFIG_PPC64
172EXPORT_SYMBOL(__flush_icache_range);
173#else
174EXPORT_SYMBOL(flush_instruction_cache);
175EXPORT_SYMBOL(flush_icache_range);
176EXPORT_SYMBOL(flush_tlb_kernel_range);
177EXPORT_SYMBOL(flush_tlb_page);
178EXPORT_SYMBOL(_tlbie);
179#endif
180EXPORT_SYMBOL(flush_dcache_range);
181
182#ifdef CONFIG_SMP
183EXPORT_SYMBOL(smp_call_function);
184#ifdef CONFIG_PPC32
185EXPORT_SYMBOL(smp_hw_index);
186#endif
187#endif
188
189#ifdef CONFIG_ADB
190EXPORT_SYMBOL(adb_request);
191EXPORT_SYMBOL(adb_register);
192EXPORT_SYMBOL(adb_unregister);
193EXPORT_SYMBOL(adb_poll);
194EXPORT_SYMBOL(adb_try_handler_change);
195#endif /* CONFIG_ADB */
196#ifdef CONFIG_ADB_CUDA
197EXPORT_SYMBOL(cuda_request);
198EXPORT_SYMBOL(cuda_poll);
199#endif /* CONFIG_ADB_CUDA */
200#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_PPC32)
201EXPORT_SYMBOL(_machine);
202#endif
203#ifdef CONFIG_PPC_PMAC
204EXPORT_SYMBOL(sys_ctrler);
205#endif
206#ifdef CONFIG_VT
207EXPORT_SYMBOL(kd_mksound);
208#endif
209EXPORT_SYMBOL(to_tm);
210
211#ifdef CONFIG_PPC32
212long long __ashrdi3(long long, int);
213long long __ashldi3(long long, int);
214long long __lshrdi3(long long, int);
215EXPORT_SYMBOL(__ashrdi3);
216EXPORT_SYMBOL(__ashldi3);
217EXPORT_SYMBOL(__lshrdi3);
218#endif
219
220EXPORT_SYMBOL(memcpy);
221EXPORT_SYMBOL(memset);
222EXPORT_SYMBOL(memmove);
223EXPORT_SYMBOL(memscan);
224EXPORT_SYMBOL(memcmp);
225EXPORT_SYMBOL(memchr);
226
227#if defined(CONFIG_FB_VGA16_MODULE)
228EXPORT_SYMBOL(screen_info);
229#endif
230
231#ifdef CONFIG_PPC32
232EXPORT_SYMBOL(__delay);
233EXPORT_SYMBOL(timer_interrupt);
234EXPORT_SYMBOL(irq_desc);
235EXPORT_SYMBOL(tb_ticks_per_jiffy);
236EXPORT_SYMBOL(console_drivers);
237EXPORT_SYMBOL(cacheable_memcpy);
238#endif
239
240EXPORT_SYMBOL(__up);
241EXPORT_SYMBOL(__down);
242EXPORT_SYMBOL(__down_interruptible);
243
244#ifdef CONFIG_8xx
245EXPORT_SYMBOL(cpm_install_handler);
246EXPORT_SYMBOL(cpm_free_handler);
247#endif /* CONFIG_8xx */
248#if defined(CONFIG_8xx) || defined(CONFIG_40x) || defined(CONFIG_85xx) ||\
249 defined(CONFIG_83xx)
250EXPORT_SYMBOL(__res);
251#endif
252
253#ifdef CONFIG_PPC32
254EXPORT_SYMBOL(next_mmu_context);
255EXPORT_SYMBOL(set_context);
256#endif
257
258#ifdef CONFIG_PPC_STD_MMU_32
259extern long mol_trampoline;
260EXPORT_SYMBOL(mol_trampoline); /* For MOL */
261EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
262EXPORT_SYMBOL_GPL(__handle_mm_fault); /* For MOL */
263#ifdef CONFIG_SMP
264extern int mmu_hash_lock;
265EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
266#endif /* CONFIG_SMP */
267extern long *intercept_table;
268EXPORT_SYMBOL(intercept_table);
269#endif /* CONFIG_PPC_STD_MMU_32 */
270#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
271EXPORT_SYMBOL(__mtdcr);
272EXPORT_SYMBOL(__mfdcr);
273#endif
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
new file mode 100644
index 000000000000..8f85dabe4df3
--- /dev/null
+++ b/arch/powerpc/kernel/process.c
@@ -0,0 +1,919 @@
1/*
2 * arch/ppc/kernel/process.c
3 *
4 * Derived from "arch/i386/kernel/process.c"
5 * Copyright (C) 1995 Linus Torvalds
6 *
7 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
8 * Paul Mackerras (paulus@cs.anu.edu.au)
9 *
10 * PowerPC version
11 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#include <linux/config.h>
20#include <linux/errno.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26#include <linux/stddef.h>
27#include <linux/unistd.h>
28#include <linux/ptrace.h>
29#include <linux/slab.h>
30#include <linux/user.h>
31#include <linux/elf.h>
32#include <linux/init.h>
33#include <linux/prctl.h>
34#include <linux/init_task.h>
35#include <linux/module.h>
36#include <linux/kallsyms.h>
37#include <linux/mqueue.h>
38#include <linux/hardirq.h>
39#include <linux/utsname.h>
40#include <linux/kprobes.h>
41
42#include <asm/pgtable.h>
43#include <asm/uaccess.h>
44#include <asm/system.h>
45#include <asm/io.h>
46#include <asm/processor.h>
47#include <asm/mmu.h>
48#include <asm/prom.h>
49#ifdef CONFIG_PPC64
50#include <asm/firmware.h>
51#include <asm/plpar_wrappers.h>
52#include <asm/time.h>
53#endif
54
55extern unsigned long _get_SP(void);
56
57#ifndef CONFIG_SMP
58struct task_struct *last_task_used_math = NULL;
59struct task_struct *last_task_used_altivec = NULL;
60struct task_struct *last_task_used_spe = NULL;
61#endif
62
63/*
64 * Make sure the floating-point register state in the
65 * the thread_struct is up to date for task tsk.
66 */
67void flush_fp_to_thread(struct task_struct *tsk)
68{
69 if (tsk->thread.regs) {
70 /*
71 * We need to disable preemption here because if we didn't,
72 * another process could get scheduled after the regs->msr
73 * test but before we have finished saving the FP registers
74 * to the thread_struct. That process could take over the
75 * FPU, and then when we get scheduled again we would store
76 * bogus values for the remaining FP registers.
77 */
78 preempt_disable();
79 if (tsk->thread.regs->msr & MSR_FP) {
80#ifdef CONFIG_SMP
81 /*
82 * This should only ever be called for current or
83 * for a stopped child process. Since we save away
84 * the FP register state on context switch on SMP,
85 * there is something wrong if a stopped child appears
86 * to still have its FP state in the CPU registers.
87 */
88 BUG_ON(tsk != current);
89#endif
90 giveup_fpu(current);
91 }
92 preempt_enable();
93 }
94}
95
96void enable_kernel_fp(void)
97{
98 WARN_ON(preemptible());
99
100#ifdef CONFIG_SMP
101 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
102 giveup_fpu(current);
103 else
104 giveup_fpu(NULL); /* just enables FP for kernel */
105#else
106 giveup_fpu(last_task_used_math);
107#endif /* CONFIG_SMP */
108}
109EXPORT_SYMBOL(enable_kernel_fp);
110
111int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
112{
113 if (!tsk->thread.regs)
114 return 0;
115 flush_fp_to_thread(current);
116
117 memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
118
119 return 1;
120}
121
122#ifdef CONFIG_ALTIVEC
123void enable_kernel_altivec(void)
124{
125 WARN_ON(preemptible());
126
127#ifdef CONFIG_SMP
128 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
129 giveup_altivec(current);
130 else
131 giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
132#else
133 giveup_altivec(last_task_used_altivec);
134#endif /* CONFIG_SMP */
135}
136EXPORT_SYMBOL(enable_kernel_altivec);
137
138/*
139 * Make sure the VMX/Altivec register state in the
140 * the thread_struct is up to date for task tsk.
141 */
142void flush_altivec_to_thread(struct task_struct *tsk)
143{
144 if (tsk->thread.regs) {
145 preempt_disable();
146 if (tsk->thread.regs->msr & MSR_VEC) {
147#ifdef CONFIG_SMP
148 BUG_ON(tsk != current);
149#endif
150 giveup_altivec(current);
151 }
152 preempt_enable();
153 }
154}
155
156int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
157{
158 flush_altivec_to_thread(current);
159 memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
160 return 1;
161}
162#endif /* CONFIG_ALTIVEC */
163
164#ifdef CONFIG_SPE
165
166void enable_kernel_spe(void)
167{
168 WARN_ON(preemptible());
169
170#ifdef CONFIG_SMP
171 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
172 giveup_spe(current);
173 else
174 giveup_spe(NULL); /* just enable SPE for kernel - force */
175#else
176 giveup_spe(last_task_used_spe);
177#endif /* __SMP __ */
178}
179EXPORT_SYMBOL(enable_kernel_spe);
180
181void flush_spe_to_thread(struct task_struct *tsk)
182{
183 if (tsk->thread.regs) {
184 preempt_disable();
185 if (tsk->thread.regs->msr & MSR_SPE) {
186#ifdef CONFIG_SMP
187 BUG_ON(tsk != current);
188#endif
189 giveup_spe(current);
190 }
191 preempt_enable();
192 }
193}
194
195int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
196{
197 flush_spe_to_thread(current);
198 /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
199 memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
200 return 1;
201}
202#endif /* CONFIG_SPE */
203
204static void set_dabr_spr(unsigned long val)
205{
206 mtspr(SPRN_DABR, val);
207}
208
209int set_dabr(unsigned long dabr)
210{
211 int ret = 0;
212
213#ifdef CONFIG_PPC64
214 if (firmware_has_feature(FW_FEATURE_XDABR)) {
215 /* We want to catch accesses from kernel and userspace */
216 unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER;
217 ret = plpar_set_xdabr(dabr, flags);
218 } else if (firmware_has_feature(FW_FEATURE_DABR)) {
219 ret = plpar_set_dabr(dabr);
220 } else
221#endif
222 set_dabr_spr(dabr);
223
224 return ret;
225}
226
227#ifdef CONFIG_PPC64
228DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
229static DEFINE_PER_CPU(unsigned long, current_dabr);
230#endif
231
232struct task_struct *__switch_to(struct task_struct *prev,
233 struct task_struct *new)
234{
235 struct thread_struct *new_thread, *old_thread;
236 unsigned long flags;
237 struct task_struct *last;
238
239#ifdef CONFIG_SMP
240 /* avoid complexity of lazy save/restore of fpu
241 * by just saving it every time we switch out if
242 * this task used the fpu during the last quantum.
243 *
244 * If it tries to use the fpu again, it'll trap and
245 * reload its fp regs. So we don't have to do a restore
246 * every switch, just a save.
247 * -- Cort
248 */
249 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
250 giveup_fpu(prev);
251#ifdef CONFIG_ALTIVEC
252 /*
253 * If the previous thread used altivec in the last quantum
254 * (thus changing altivec regs) then save them.
255 * We used to check the VRSAVE register but not all apps
256 * set it, so we don't rely on it now (and in fact we need
257 * to save & restore VSCR even if VRSAVE == 0). -- paulus
258 *
259 * On SMP we always save/restore altivec regs just to avoid the
260 * complexity of changing processors.
261 * -- Cort
262 */
263 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
264 giveup_altivec(prev);
265#endif /* CONFIG_ALTIVEC */
266#ifdef CONFIG_SPE
267 /*
268 * If the previous thread used spe in the last quantum
269 * (thus changing spe regs) then save them.
270 *
271 * On SMP we always save/restore spe regs just to avoid the
272 * complexity of changing processors.
273 */
274 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
275 giveup_spe(prev);
276#endif /* CONFIG_SPE */
277
278#else /* CONFIG_SMP */
279#ifdef CONFIG_ALTIVEC
280 /* Avoid the trap. On smp this this never happens since
281 * we don't set last_task_used_altivec -- Cort
282 */
283 if (new->thread.regs && last_task_used_altivec == new)
284 new->thread.regs->msr |= MSR_VEC;
285#endif /* CONFIG_ALTIVEC */
286#ifdef CONFIG_SPE
287 /* Avoid the trap. On smp this this never happens since
288 * we don't set last_task_used_spe
289 */
290 if (new->thread.regs && last_task_used_spe == new)
291 new->thread.regs->msr |= MSR_SPE;
292#endif /* CONFIG_SPE */
293
294#endif /* CONFIG_SMP */
295
296#ifdef CONFIG_PPC64 /* for now */
297 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) {
298 set_dabr(new->thread.dabr);
299 __get_cpu_var(current_dabr) = new->thread.dabr;
300 }
301
302 flush_tlb_pending();
303#endif
304
305 new_thread = &new->thread;
306 old_thread = &current->thread;
307
308#ifdef CONFIG_PPC64
309 /*
310 * Collect processor utilization data per process
311 */
312 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
313 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
314 long unsigned start_tb, current_tb;
315 start_tb = old_thread->start_tb;
316 cu->current_tb = current_tb = mfspr(SPRN_PURR);
317 old_thread->accum_tb += (current_tb - start_tb);
318 new_thread->start_tb = current_tb;
319 }
320#endif
321
322 local_irq_save(flags);
323 last = _switch(old_thread, new_thread);
324
325 local_irq_restore(flags);
326
327 return last;
328}
329
330static int instructions_to_print = 16;
331
332#ifdef CONFIG_PPC64
333#define BAD_PC(pc) ((REGION_ID(pc) != KERNEL_REGION_ID) && \
334 (REGION_ID(pc) != VMALLOC_REGION_ID))
335#else
336#define BAD_PC(pc) ((pc) < KERNELBASE)
337#endif
338
339static void show_instructions(struct pt_regs *regs)
340{
341 int i;
342 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
343 sizeof(int));
344
345 printk("Instruction dump:");
346
347 for (i = 0; i < instructions_to_print; i++) {
348 int instr;
349
350 if (!(i % 8))
351 printk("\n");
352
353 if (BAD_PC(pc) || __get_user(instr, (unsigned int *)pc)) {
354 printk("XXXXXXXX ");
355 } else {
356 if (regs->nip == pc)
357 printk("<%08x> ", instr);
358 else
359 printk("%08x ", instr);
360 }
361
362 pc += sizeof(int);
363 }
364
365 printk("\n");
366}
367
368static struct regbit {
369 unsigned long bit;
370 const char *name;
371} msr_bits[] = {
372 {MSR_EE, "EE"},
373 {MSR_PR, "PR"},
374 {MSR_FP, "FP"},
375 {MSR_ME, "ME"},
376 {MSR_IR, "IR"},
377 {MSR_DR, "DR"},
378 {0, NULL}
379};
380
381static void printbits(unsigned long val, struct regbit *bits)
382{
383 const char *sep = "";
384
385 printk("<");
386 for (; bits->bit; ++bits)
387 if (val & bits->bit) {
388 printk("%s%s", sep, bits->name);
389 sep = ",";
390 }
391 printk(">");
392}
393
394#ifdef CONFIG_PPC64
395#define REG "%016lX"
396#define REGS_PER_LINE 4
397#define LAST_VOLATILE 13
398#else
399#define REG "%08lX"
400#define REGS_PER_LINE 8
401#define LAST_VOLATILE 12
402#endif
403
404void show_regs(struct pt_regs * regs)
405{
406 int i, trap;
407
408 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
409 regs->nip, regs->link, regs->ctr);
410 printk("REGS: %p TRAP: %04lx %s (%s)\n",
411 regs, regs->trap, print_tainted(), system_utsname.release);
412 printk("MSR: "REG" ", regs->msr);
413 printbits(regs->msr, msr_bits);
414 printk(" CR: %08lX XER: %08lX\n", regs->ccr, regs->xer);
415 trap = TRAP(regs);
416 if (trap == 0x300 || trap == 0x600)
417 printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
418 printk("TASK = %p[%d] '%s' THREAD: %p",
419 current, current->pid, current->comm, current->thread_info);
420
421#ifdef CONFIG_SMP
422 printk(" CPU: %d", smp_processor_id());
423#endif /* CONFIG_SMP */
424
425 for (i = 0; i < 32; i++) {
426 if ((i % REGS_PER_LINE) == 0)
427 printk("\n" KERN_INFO "GPR%02d: ", i);
428 printk(REG " ", regs->gpr[i]);
429 if (i == LAST_VOLATILE && !FULL_REGS(regs))
430 break;
431 }
432 printk("\n");
433#ifdef CONFIG_KALLSYMS
434 /*
435 * Lookup NIP late so we have the best change of getting the
436 * above info out without failing
437 */
438 printk("NIP ["REG"] ", regs->nip);
439 print_symbol("%s\n", regs->nip);
440 printk("LR ["REG"] ", regs->link);
441 print_symbol("%s\n", regs->link);
442#endif
443 show_stack(current, (unsigned long *) regs->gpr[1]);
444 if (!user_mode(regs))
445 show_instructions(regs);
446}
447
448void exit_thread(void)
449{
450 kprobe_flush_task(current);
451
452#ifndef CONFIG_SMP
453 if (last_task_used_math == current)
454 last_task_used_math = NULL;
455#ifdef CONFIG_ALTIVEC
456 if (last_task_used_altivec == current)
457 last_task_used_altivec = NULL;
458#endif /* CONFIG_ALTIVEC */
459#ifdef CONFIG_SPE
460 if (last_task_used_spe == current)
461 last_task_used_spe = NULL;
462#endif
463#endif /* CONFIG_SMP */
464}
465
466void flush_thread(void)
467{
468#ifdef CONFIG_PPC64
469 struct thread_info *t = current_thread_info();
470
471 if (t->flags & _TIF_ABI_PENDING)
472 t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
473#endif
474 kprobe_flush_task(current);
475
476#ifndef CONFIG_SMP
477 if (last_task_used_math == current)
478 last_task_used_math = NULL;
479#ifdef CONFIG_ALTIVEC
480 if (last_task_used_altivec == current)
481 last_task_used_altivec = NULL;
482#endif /* CONFIG_ALTIVEC */
483#ifdef CONFIG_SPE
484 if (last_task_used_spe == current)
485 last_task_used_spe = NULL;
486#endif
487#endif /* CONFIG_SMP */
488
489#ifdef CONFIG_PPC64 /* for now */
490 if (current->thread.dabr) {
491 current->thread.dabr = 0;
492 set_dabr(0);
493 }
494#endif
495}
496
497void
498release_thread(struct task_struct *t)
499{
500}
501
502/*
503 * This gets called before we allocate a new thread and copy
504 * the current task into it.
505 */
506void prepare_to_copy(struct task_struct *tsk)
507{
508 flush_fp_to_thread(current);
509 flush_altivec_to_thread(current);
510 flush_spe_to_thread(current);
511}
512
513/*
514 * Copy a thread..
515 */
516int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
517 unsigned long unused, struct task_struct *p,
518 struct pt_regs *regs)
519{
520 struct pt_regs *childregs, *kregs;
521 extern void ret_from_fork(void);
522 unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
523
524 CHECK_FULL_REGS(regs);
525 /* Copy registers */
526 sp -= sizeof(struct pt_regs);
527 childregs = (struct pt_regs *) sp;
528 *childregs = *regs;
529 if ((childregs->msr & MSR_PR) == 0) {
530 /* for kernel thread, set `current' and stackptr in new task */
531 childregs->gpr[1] = sp + sizeof(struct pt_regs);
532#ifdef CONFIG_PPC32
533 childregs->gpr[2] = (unsigned long) p;
534#else
535 clear_ti_thread_flag(p->thread_info, TIF_32BIT);
536#endif
537 p->thread.regs = NULL; /* no user register state */
538 } else {
539 childregs->gpr[1] = usp;
540 p->thread.regs = childregs;
541 if (clone_flags & CLONE_SETTLS) {
542#ifdef CONFIG_PPC64
543 if (!test_thread_flag(TIF_32BIT))
544 childregs->gpr[13] = childregs->gpr[6];
545 else
546#endif
547 childregs->gpr[2] = childregs->gpr[6];
548 }
549 }
550 childregs->gpr[3] = 0; /* Result from fork() */
551 sp -= STACK_FRAME_OVERHEAD;
552
553 /*
554 * The way this works is that at some point in the future
555 * some task will call _switch to switch to the new task.
556 * That will pop off the stack frame created below and start
557 * the new task running at ret_from_fork. The new task will
558 * do some house keeping and then return from the fork or clone
559 * system call, using the stack frame created above.
560 */
561 sp -= sizeof(struct pt_regs);
562 kregs = (struct pt_regs *) sp;
563 sp -= STACK_FRAME_OVERHEAD;
564 p->thread.ksp = sp;
565
566#ifdef CONFIG_PPC64
567 if (cpu_has_feature(CPU_FTR_SLB)) {
568 unsigned long sp_vsid = get_kernel_vsid(sp);
569
570 sp_vsid <<= SLB_VSID_SHIFT;
571 sp_vsid |= SLB_VSID_KERNEL;
572 if (cpu_has_feature(CPU_FTR_16M_PAGE))
573 sp_vsid |= SLB_VSID_L;
574
575 p->thread.ksp_vsid = sp_vsid;
576 }
577
578 /*
579 * The PPC64 ABI makes use of a TOC to contain function
580 * pointers. The function (ret_from_except) is actually a pointer
581 * to the TOC entry. The first entry is a pointer to the actual
582 * function.
583 */
584 kregs->nip = *((unsigned long *)ret_from_fork);
585#else
586 kregs->nip = (unsigned long)ret_from_fork;
587 p->thread.last_syscall = -1;
588#endif
589
590 return 0;
591}
592
593/*
594 * Set up a thread for executing a new program
595 */
596void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
597{
598#ifdef CONFIG_PPC64
599 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
600#endif
601
602 set_fs(USER_DS);
603
604 /*
605 * If we exec out of a kernel thread then thread.regs will not be
606 * set. Do it now.
607 */
608 if (!current->thread.regs) {
609 unsigned long childregs = (unsigned long)current->thread_info +
610 THREAD_SIZE;
611 childregs -= sizeof(struct pt_regs);
612 current->thread.regs = (struct pt_regs *)childregs;
613 }
614
615 memset(regs->gpr, 0, sizeof(regs->gpr));
616 regs->ctr = 0;
617 regs->link = 0;
618 regs->xer = 0;
619 regs->ccr = 0;
620 regs->gpr[1] = sp;
621
622#ifdef CONFIG_PPC32
623 regs->mq = 0;
624 regs->nip = start;
625 regs->msr = MSR_USER;
626#else
627 if (!test_thread_flag(TIF_32BIT)) {
628 unsigned long entry, toc;
629
630 /* start is a relocated pointer to the function descriptor for
631 * the elf _start routine. The first entry in the function
632 * descriptor is the entry address of _start and the second
633 * entry is the TOC value we need to use.
634 */
635 __get_user(entry, (unsigned long __user *)start);
636 __get_user(toc, (unsigned long __user *)start+1);
637
638 /* Check whether the e_entry function descriptor entries
639 * need to be relocated before we can use them.
640 */
641 if (load_addr != 0) {
642 entry += load_addr;
643 toc += load_addr;
644 }
645 regs->nip = entry;
646 regs->gpr[2] = toc;
647 regs->msr = MSR_USER64;
648 } else {
649 regs->nip = start;
650 regs->gpr[2] = 0;
651 regs->msr = MSR_USER32;
652 }
653#endif
654
655#ifndef CONFIG_SMP
656 if (last_task_used_math == current)
657 last_task_used_math = NULL;
658#ifdef CONFIG_ALTIVEC
659 if (last_task_used_altivec == current)
660 last_task_used_altivec = NULL;
661#endif
662#ifdef CONFIG_SPE
663 if (last_task_used_spe == current)
664 last_task_used_spe = NULL;
665#endif
666#endif /* CONFIG_SMP */
667 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
668 current->thread.fpscr.val = 0;
669#ifdef CONFIG_ALTIVEC
670 memset(current->thread.vr, 0, sizeof(current->thread.vr));
671 memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
672 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
673 current->thread.vrsave = 0;
674 current->thread.used_vr = 0;
675#endif /* CONFIG_ALTIVEC */
676#ifdef CONFIG_SPE
677 memset(current->thread.evr, 0, sizeof(current->thread.evr));
678 current->thread.acc = 0;
679 current->thread.spefscr = 0;
680 current->thread.used_spe = 0;
681#endif /* CONFIG_SPE */
682}
683
684#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
685 | PR_FP_EXC_RES | PR_FP_EXC_INV)
686
687int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
688{
689 struct pt_regs *regs = tsk->thread.regs;
690
691 /* This is a bit hairy. If we are an SPE enabled processor
692 * (have embedded fp) we store the IEEE exception enable flags in
693 * fpexc_mode. fpexc_mode is also used for setting FP exception
694 * mode (asyn, precise, disabled) for 'Classic' FP. */
695 if (val & PR_FP_EXC_SW_ENABLE) {
696#ifdef CONFIG_SPE
697 tsk->thread.fpexc_mode = val &
698 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
699 return 0;
700#else
701 return -EINVAL;
702#endif
703 }
704
705 /* on a CONFIG_SPE this does not hurt us. The bits that
706 * __pack_fe01 use do not overlap with bits used for
707 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
708 * on CONFIG_SPE implementations are reserved so writing to
709 * them does not change anything */
710 if (val > PR_FP_EXC_PRECISE)
711 return -EINVAL;
712 tsk->thread.fpexc_mode = __pack_fe01(val);
713 if (regs != NULL && (regs->msr & MSR_FP) != 0)
714 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
715 | tsk->thread.fpexc_mode;
716 return 0;
717}
718
719int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
720{
721 unsigned int val;
722
723 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
724#ifdef CONFIG_SPE
725 val = tsk->thread.fpexc_mode;
726#else
727 return -EINVAL;
728#endif
729 else
730 val = __unpack_fe01(tsk->thread.fpexc_mode);
731 return put_user(val, (unsigned int __user *) adr);
732}
733
734#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
735
736int sys_clone(unsigned long clone_flags, unsigned long usp,
737 int __user *parent_tidp, void __user *child_threadptr,
738 int __user *child_tidp, int p6,
739 struct pt_regs *regs)
740{
741 CHECK_FULL_REGS(regs);
742 if (usp == 0)
743 usp = regs->gpr[1]; /* stack pointer for child */
744#ifdef CONFIG_PPC64
745 if (test_thread_flag(TIF_32BIT)) {
746 parent_tidp = TRUNC_PTR(parent_tidp);
747 child_tidp = TRUNC_PTR(child_tidp);
748 }
749#endif
750 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
751}
752
753int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
754 unsigned long p4, unsigned long p5, unsigned long p6,
755 struct pt_regs *regs)
756{
757 CHECK_FULL_REGS(regs);
758 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
759}
760
761int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
762 unsigned long p4, unsigned long p5, unsigned long p6,
763 struct pt_regs *regs)
764{
765 CHECK_FULL_REGS(regs);
766 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
767 regs, 0, NULL, NULL);
768}
769
770int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
771 unsigned long a3, unsigned long a4, unsigned long a5,
772 struct pt_regs *regs)
773{
774 int error;
775 char *filename;
776
777 filename = getname((char __user *) a0);
778 error = PTR_ERR(filename);
779 if (IS_ERR(filename))
780 goto out;
781 flush_fp_to_thread(current);
782 flush_altivec_to_thread(current);
783 flush_spe_to_thread(current);
784 error = do_execve(filename, (char __user * __user *) a1,
785 (char __user * __user *) a2, regs);
786 if (error == 0) {
787 task_lock(current);
788 current->ptrace &= ~PT_DTRACE;
789 task_unlock(current);
790 }
791 putname(filename);
792out:
793 return error;
794}
795
796static int validate_sp(unsigned long sp, struct task_struct *p,
797 unsigned long nbytes)
798{
799 unsigned long stack_page = (unsigned long)p->thread_info;
800
801 if (sp >= stack_page + sizeof(struct thread_struct)
802 && sp <= stack_page + THREAD_SIZE - nbytes)
803 return 1;
804
805#ifdef CONFIG_IRQSTACKS
806 stack_page = (unsigned long) hardirq_ctx[task_cpu(p)];
807 if (sp >= stack_page + sizeof(struct thread_struct)
808 && sp <= stack_page + THREAD_SIZE - nbytes)
809 return 1;
810
811 stack_page = (unsigned long) softirq_ctx[task_cpu(p)];
812 if (sp >= stack_page + sizeof(struct thread_struct)
813 && sp <= stack_page + THREAD_SIZE - nbytes)
814 return 1;
815#endif
816
817 return 0;
818}
819
820#ifdef CONFIG_PPC64
821#define MIN_STACK_FRAME 112 /* same as STACK_FRAME_OVERHEAD, in fact */
822#define FRAME_LR_SAVE 2
823#define INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD + 288)
824#define REGS_MARKER 0x7265677368657265ul
825#define FRAME_MARKER 12
826#else
827#define MIN_STACK_FRAME 16
828#define FRAME_LR_SAVE 1
829#define INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
830#define REGS_MARKER 0x72656773ul
831#define FRAME_MARKER 2
832#endif
833
834unsigned long get_wchan(struct task_struct *p)
835{
836 unsigned long ip, sp;
837 int count = 0;
838
839 if (!p || p == current || p->state == TASK_RUNNING)
840 return 0;
841
842 sp = p->thread.ksp;
843 if (!validate_sp(sp, p, MIN_STACK_FRAME))
844 return 0;
845
846 do {
847 sp = *(unsigned long *)sp;
848 if (!validate_sp(sp, p, MIN_STACK_FRAME))
849 return 0;
850 if (count > 0) {
851 ip = ((unsigned long *)sp)[FRAME_LR_SAVE];
852 if (!in_sched_functions(ip))
853 return ip;
854 }
855 } while (count++ < 16);
856 return 0;
857}
858EXPORT_SYMBOL(get_wchan);
859
860static int kstack_depth_to_print = 64;
861
862void show_stack(struct task_struct *tsk, unsigned long *stack)
863{
864 unsigned long sp, ip, lr, newsp;
865 int count = 0;
866 int firstframe = 1;
867
868 sp = (unsigned long) stack;
869 if (tsk == NULL)
870 tsk = current;
871 if (sp == 0) {
872 if (tsk == current)
873 asm("mr %0,1" : "=r" (sp));
874 else
875 sp = tsk->thread.ksp;
876 }
877
878 lr = 0;
879 printk("Call Trace:\n");
880 do {
881 if (!validate_sp(sp, tsk, MIN_STACK_FRAME))
882 return;
883
884 stack = (unsigned long *) sp;
885 newsp = stack[0];
886 ip = stack[FRAME_LR_SAVE];
887 if (!firstframe || ip != lr) {
888 printk("["REG"] ["REG"] ", sp, ip);
889 print_symbol("%s", ip);
890 if (firstframe)
891 printk(" (unreliable)");
892 printk("\n");
893 }
894 firstframe = 0;
895
896 /*
897 * See if this is an exception frame.
898 * We look for the "regshere" marker in the current frame.
899 */
900 if (validate_sp(sp, tsk, INT_FRAME_SIZE)
901 && stack[FRAME_MARKER] == REGS_MARKER) {
902 struct pt_regs *regs = (struct pt_regs *)
903 (sp + STACK_FRAME_OVERHEAD);
904 printk("--- Exception: %lx", regs->trap);
905 print_symbol(" at %s\n", regs->nip);
906 lr = regs->link;
907 print_symbol(" LR = %s\n", lr);
908 firstframe = 1;
909 }
910
911 sp = newsp;
912 } while (count++ < kstack_depth_to_print);
913}
914
915void dump_stack(void)
916{
917 show_stack(current, NULL);
918}
919EXPORT_SYMBOL(dump_stack);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
new file mode 100644
index 000000000000..2eccd0e159e3
--- /dev/null
+++ b/arch/powerpc/kernel/prom.c
@@ -0,0 +1,2170 @@
1/*
2 * Procedures for creating, accessing and interpreting the device tree.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#undef DEBUG
17
18#include <stdarg.h>
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/threads.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/stringify.h>
28#include <linux/delay.h>
29#include <linux/initrd.h>
30#include <linux/bitops.h>
31#include <linux/module.h>
32
33#include <asm/prom.h>
34#include <asm/rtas.h>
35#include <asm/lmb.h>
36#include <asm/page.h>
37#include <asm/processor.h>
38#include <asm/irq.h>
39#include <asm/io.h>
40#include <asm/smp.h>
41#include <asm/system.h>
42#include <asm/mmu.h>
43#include <asm/pgtable.h>
44#include <asm/pci.h>
45#include <asm/iommu.h>
46#include <asm/btext.h>
47#include <asm/sections.h>
48#include <asm/machdep.h>
49#include <asm/pSeries_reconfig.h>
50#include <asm/pci-bridge.h>
51#ifdef CONFIG_PPC64
52#include <asm/systemcfg.h>
53#endif
54
55#ifdef DEBUG
56#define DBG(fmt...) printk(KERN_ERR fmt)
57#else
58#define DBG(fmt...)
59#endif
60
61struct pci_reg_property {
62 struct pci_address addr;
63 u32 size_hi;
64 u32 size_lo;
65};
66
67struct isa_reg_property {
68 u32 space;
69 u32 address;
70 u32 size;
71};
72
73
74typedef int interpret_func(struct device_node *, unsigned long *,
75 int, int, int);
76
77extern struct rtas_t rtas;
78extern struct lmb lmb;
79extern unsigned long klimit;
80
81static int __initdata dt_root_addr_cells;
82static int __initdata dt_root_size_cells;
83
84#ifdef CONFIG_PPC64
85static int __initdata iommu_is_off;
86int __initdata iommu_force_on;
87unsigned long tce_alloc_start, tce_alloc_end;
88#endif
89
90typedef u32 cell_t;
91
92#if 0
93static struct boot_param_header *initial_boot_params __initdata;
94#else
95struct boot_param_header *initial_boot_params;
96#endif
97
98static struct device_node *allnodes = NULL;
99
100/* use when traversing tree through the allnext, child, sibling,
101 * or parent members of struct device_node.
102 */
103static DEFINE_RWLOCK(devtree_lock);
104
105/* export that to outside world */
106struct device_node *of_chosen;
107
108struct device_node *dflt_interrupt_controller;
109int num_interrupt_controllers;
110
111/*
112 * Wrapper for allocating memory for various data that needs to be
113 * attached to device nodes as they are processed at boot or when
114 * added to the device tree later (e.g. DLPAR). At boot there is
115 * already a region reserved so we just increment *mem_start by size;
116 * otherwise we call kmalloc.
117 */
118static void * prom_alloc(unsigned long size, unsigned long *mem_start)
119{
120 unsigned long tmp;
121
122 if (!mem_start)
123 return kmalloc(size, GFP_KERNEL);
124
125 tmp = *mem_start;
126 *mem_start += size;
127 return (void *)tmp;
128}
129
130/*
131 * Find the device_node with a given phandle.
132 */
133static struct device_node * find_phandle(phandle ph)
134{
135 struct device_node *np;
136
137 for (np = allnodes; np != 0; np = np->allnext)
138 if (np->linux_phandle == ph)
139 return np;
140 return NULL;
141}
142
143/*
144 * Find the interrupt parent of a node.
145 */
146static struct device_node * __devinit intr_parent(struct device_node *p)
147{
148 phandle *parp;
149
150 parp = (phandle *) get_property(p, "interrupt-parent", NULL);
151 if (parp == NULL)
152 return p->parent;
153 p = find_phandle(*parp);
154 if (p != NULL)
155 return p;
156 /*
157 * On a powermac booted with BootX, we don't get to know the
158 * phandles for any nodes, so find_phandle will return NULL.
159 * Fortunately these machines only have one interrupt controller
160 * so there isn't in fact any ambiguity. -- paulus
161 */
162 if (num_interrupt_controllers == 1)
163 p = dflt_interrupt_controller;
164 return p;
165}
166
167/*
168 * Find out the size of each entry of the interrupts property
169 * for a node.
170 */
171int __devinit prom_n_intr_cells(struct device_node *np)
172{
173 struct device_node *p;
174 unsigned int *icp;
175
176 for (p = np; (p = intr_parent(p)) != NULL; ) {
177 icp = (unsigned int *)
178 get_property(p, "#interrupt-cells", NULL);
179 if (icp != NULL)
180 return *icp;
181 if (get_property(p, "interrupt-controller", NULL) != NULL
182 || get_property(p, "interrupt-map", NULL) != NULL) {
183 printk("oops, node %s doesn't have #interrupt-cells\n",
184 p->full_name);
185 return 1;
186 }
187 }
188#ifdef DEBUG_IRQ
189 printk("prom_n_intr_cells failed for %s\n", np->full_name);
190#endif
191 return 1;
192}
193
194/*
195 * Map an interrupt from a device up to the platform interrupt
196 * descriptor.
197 */
198static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
199 struct device_node *np, unsigned int *ints,
200 int nintrc)
201{
202 struct device_node *p, *ipar;
203 unsigned int *imap, *imask, *ip;
204 int i, imaplen, match;
205 int newintrc = 0, newaddrc = 0;
206 unsigned int *reg;
207 int naddrc;
208
209 reg = (unsigned int *) get_property(np, "reg", NULL);
210 naddrc = prom_n_addr_cells(np);
211 p = intr_parent(np);
212 while (p != NULL) {
213 if (get_property(p, "interrupt-controller", NULL) != NULL)
214 /* this node is an interrupt controller, stop here */
215 break;
216 imap = (unsigned int *)
217 get_property(p, "interrupt-map", &imaplen);
218 if (imap == NULL) {
219 p = intr_parent(p);
220 continue;
221 }
222 imask = (unsigned int *)
223 get_property(p, "interrupt-map-mask", NULL);
224 if (imask == NULL) {
225 printk("oops, %s has interrupt-map but no mask\n",
226 p->full_name);
227 return 0;
228 }
229 imaplen /= sizeof(unsigned int);
230 match = 0;
231 ipar = NULL;
232 while (imaplen > 0 && !match) {
233 /* check the child-interrupt field */
234 match = 1;
235 for (i = 0; i < naddrc && match; ++i)
236 match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
237 for (; i < naddrc + nintrc && match; ++i)
238 match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
239 imap += naddrc + nintrc;
240 imaplen -= naddrc + nintrc;
241 /* grab the interrupt parent */
242 ipar = find_phandle((phandle) *imap++);
243 --imaplen;
244 if (ipar == NULL && num_interrupt_controllers == 1)
245 /* cope with BootX not giving us phandles */
246 ipar = dflt_interrupt_controller;
247 if (ipar == NULL) {
248 printk("oops, no int parent %x in map of %s\n",
249 imap[-1], p->full_name);
250 return 0;
251 }
252 /* find the parent's # addr and intr cells */
253 ip = (unsigned int *)
254 get_property(ipar, "#interrupt-cells", NULL);
255 if (ip == NULL) {
256 printk("oops, no #interrupt-cells on %s\n",
257 ipar->full_name);
258 return 0;
259 }
260 newintrc = *ip;
261 ip = (unsigned int *)
262 get_property(ipar, "#address-cells", NULL);
263 newaddrc = (ip == NULL)? 0: *ip;
264 imap += newaddrc + newintrc;
265 imaplen -= newaddrc + newintrc;
266 }
267 if (imaplen < 0) {
268 printk("oops, error decoding int-map on %s, len=%d\n",
269 p->full_name, imaplen);
270 return 0;
271 }
272 if (!match) {
273#ifdef DEBUG_IRQ
274 printk("oops, no match in %s int-map for %s\n",
275 p->full_name, np->full_name);
276#endif
277 return 0;
278 }
279 p = ipar;
280 naddrc = newaddrc;
281 nintrc = newintrc;
282 ints = imap - nintrc;
283 reg = ints - naddrc;
284 }
285 if (p == NULL) {
286#ifdef DEBUG_IRQ
287 printk("hmmm, int tree for %s doesn't have ctrler\n",
288 np->full_name);
289#endif
290 return 0;
291 }
292 *irq = ints;
293 *ictrler = p;
294 return nintrc;
295}
296
297static unsigned char map_isa_senses[4] = {
298 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
299 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
300 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
301 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE
302};
303
304static unsigned char map_mpic_senses[4] = {
305 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE,
306 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
307 /* 2 seems to be used for the 8259 cascade... */
308 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
309 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
310};
311
312static int __devinit finish_node_interrupts(struct device_node *np,
313 unsigned long *mem_start,
314 int measure_only)
315{
316 unsigned int *ints;
317 int intlen, intrcells, intrcount;
318 int i, j, n, sense;
319 unsigned int *irq, virq;
320 struct device_node *ic;
321
322 if (num_interrupt_controllers == 0) {
323 /*
324 * Old machines just have a list of interrupt numbers
325 * and no interrupt-controller nodes.
326 */
327 ints = (unsigned int *) get_property(np, "AAPL,interrupts",
328 &intlen);
329 /* XXX old interpret_pci_props looked in parent too */
330 /* XXX old interpret_macio_props looked for interrupts
331 before AAPL,interrupts */
332 if (ints == NULL)
333 ints = (unsigned int *) get_property(np, "interrupts",
334 &intlen);
335 if (ints == NULL)
336 return 0;
337
338 np->n_intrs = intlen / sizeof(unsigned int);
339 np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
340 mem_start);
341 if (!np->intrs)
342 return -ENOMEM;
343 if (measure_only)
344 return 0;
345
346 for (i = 0; i < np->n_intrs; ++i) {
347 np->intrs[i].line = *ints++;
348 np->intrs[i].sense = IRQ_SENSE_LEVEL
349 | IRQ_POLARITY_NEGATIVE;
350 }
351 return 0;
352 }
353
354 ints = (unsigned int *) get_property(np, "interrupts", &intlen);
355 if (ints == NULL)
356 return 0;
357 intrcells = prom_n_intr_cells(np);
358 intlen /= intrcells * sizeof(unsigned int);
359
360 np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
361 if (!np->intrs)
362 return -ENOMEM;
363
364 if (measure_only)
365 return 0;
366
367 intrcount = 0;
368 for (i = 0; i < intlen; ++i, ints += intrcells) {
369 n = map_interrupt(&irq, &ic, np, ints, intrcells);
370 if (n <= 0)
371 continue;
372
373 /* don't map IRQ numbers under a cascaded 8259 controller */
374 if (ic && device_is_compatible(ic, "chrp,iic")) {
375 np->intrs[intrcount].line = irq[0];
376 sense = (n > 1)? (irq[1] & 3): 3;
377 np->intrs[intrcount].sense = map_isa_senses[sense];
378 } else {
379 virq = virt_irq_create_mapping(irq[0]);
380#ifdef CONFIG_PPC64
381 if (virq == NO_IRQ) {
382 printk(KERN_CRIT "Could not allocate interrupt"
383 " number for %s\n", np->full_name);
384 continue;
385 }
386#endif
387 np->intrs[intrcount].line = irq_offset_up(virq);
388 sense = (n > 1)? (irq[1] & 3): 1;
389 np->intrs[intrcount].sense = map_mpic_senses[sense];
390 }
391
392#ifdef CONFIG_PPC64
393 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
394 if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
395 char *name = get_property(ic->parent, "name", NULL);
396 if (name && !strcmp(name, "u3"))
397 np->intrs[intrcount].line += 128;
398 else if (!(name && !strcmp(name, "mac-io")))
399 /* ignore other cascaded controllers, such as
400 the k2-sata-root */
401 break;
402 }
403#endif
404 if (n > 2) {
405 printk("hmmm, got %d intr cells for %s:", n,
406 np->full_name);
407 for (j = 0; j < n; ++j)
408 printk(" %d", irq[j]);
409 printk("\n");
410 }
411 ++intrcount;
412 }
413 np->n_intrs = intrcount;
414
415 return 0;
416}
417
418static int __devinit interpret_pci_props(struct device_node *np,
419 unsigned long *mem_start,
420 int naddrc, int nsizec,
421 int measure_only)
422{
423 struct address_range *adr;
424 struct pci_reg_property *pci_addrs;
425 int i, l, n_addrs;
426
427 pci_addrs = (struct pci_reg_property *)
428 get_property(np, "assigned-addresses", &l);
429 if (!pci_addrs)
430 return 0;
431
432 n_addrs = l / sizeof(*pci_addrs);
433
434 adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
435 if (!adr)
436 return -ENOMEM;
437
438 if (measure_only)
439 return 0;
440
441 np->addrs = adr;
442 np->n_addrs = n_addrs;
443
444 for (i = 0; i < n_addrs; i++) {
445 adr[i].space = pci_addrs[i].addr.a_hi;
446 adr[i].address = pci_addrs[i].addr.a_lo |
447 ((u64)pci_addrs[i].addr.a_mid << 32);
448 adr[i].size = pci_addrs[i].size_lo;
449 }
450
451 return 0;
452}
453
454static int __init interpret_dbdma_props(struct device_node *np,
455 unsigned long *mem_start,
456 int naddrc, int nsizec,
457 int measure_only)
458{
459 struct reg_property32 *rp;
460 struct address_range *adr;
461 unsigned long base_address;
462 int i, l;
463 struct device_node *db;
464
465 base_address = 0;
466 if (!measure_only) {
467 for (db = np->parent; db != NULL; db = db->parent) {
468 if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
469 base_address = db->addrs[0].address;
470 break;
471 }
472 }
473 }
474
475 rp = (struct reg_property32 *) get_property(np, "reg", &l);
476 if (rp != 0 && l >= sizeof(struct reg_property32)) {
477 i = 0;
478 adr = (struct address_range *) (*mem_start);
479 while ((l -= sizeof(struct reg_property32)) >= 0) {
480 if (!measure_only) {
481 adr[i].space = 2;
482 adr[i].address = rp[i].address + base_address;
483 adr[i].size = rp[i].size;
484 }
485 ++i;
486 }
487 np->addrs = adr;
488 np->n_addrs = i;
489 (*mem_start) += i * sizeof(struct address_range);
490 }
491
492 return 0;
493}
494
495static int __init interpret_macio_props(struct device_node *np,
496 unsigned long *mem_start,
497 int naddrc, int nsizec,
498 int measure_only)
499{
500 struct reg_property32 *rp;
501 struct address_range *adr;
502 unsigned long base_address;
503 int i, l;
504 struct device_node *db;
505
506 base_address = 0;
507 if (!measure_only) {
508 for (db = np->parent; db != NULL; db = db->parent) {
509 if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
510 base_address = db->addrs[0].address;
511 break;
512 }
513 }
514 }
515
516 rp = (struct reg_property32 *) get_property(np, "reg", &l);
517 if (rp != 0 && l >= sizeof(struct reg_property32)) {
518 i = 0;
519 adr = (struct address_range *) (*mem_start);
520 while ((l -= sizeof(struct reg_property32)) >= 0) {
521 if (!measure_only) {
522 adr[i].space = 2;
523 adr[i].address = rp[i].address + base_address;
524 adr[i].size = rp[i].size;
525 }
526 ++i;
527 }
528 np->addrs = adr;
529 np->n_addrs = i;
530 (*mem_start) += i * sizeof(struct address_range);
531 }
532
533 return 0;
534}
535
536static int __init interpret_isa_props(struct device_node *np,
537 unsigned long *mem_start,
538 int naddrc, int nsizec,
539 int measure_only)
540{
541 struct isa_reg_property *rp;
542 struct address_range *adr;
543 int i, l;
544
545 rp = (struct isa_reg_property *) get_property(np, "reg", &l);
546 if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
547 i = 0;
548 adr = (struct address_range *) (*mem_start);
549 while ((l -= sizeof(struct isa_reg_property)) >= 0) {
550 if (!measure_only) {
551 adr[i].space = rp[i].space;
552 adr[i].address = rp[i].address;
553 adr[i].size = rp[i].size;
554 }
555 ++i;
556 }
557 np->addrs = adr;
558 np->n_addrs = i;
559 (*mem_start) += i * sizeof(struct address_range);
560 }
561
562 return 0;
563}
564
565static int __init interpret_root_props(struct device_node *np,
566 unsigned long *mem_start,
567 int naddrc, int nsizec,
568 int measure_only)
569{
570 struct address_range *adr;
571 int i, l;
572 unsigned int *rp;
573 int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
574
575 rp = (unsigned int *) get_property(np, "reg", &l);
576 if (rp != 0 && l >= rpsize) {
577 i = 0;
578 adr = (struct address_range *) (*mem_start);
579 while ((l -= rpsize) >= 0) {
580 if (!measure_only) {
581 adr[i].space = 0;
582 adr[i].address = rp[naddrc - 1];
583 adr[i].size = rp[naddrc + nsizec - 1];
584 }
585 ++i;
586 rp += naddrc + nsizec;
587 }
588 np->addrs = adr;
589 np->n_addrs = i;
590 (*mem_start) += i * sizeof(struct address_range);
591 }
592
593 return 0;
594}
595
596static int __devinit finish_node(struct device_node *np,
597 unsigned long *mem_start,
598 interpret_func *ifunc,
599 int naddrc, int nsizec,
600 int measure_only)
601{
602 struct device_node *child;
603 int *ip, rc = 0;
604
605 /* get the device addresses and interrupts */
606 if (ifunc != NULL)
607 rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
608 if (rc)
609 goto out;
610
611 rc = finish_node_interrupts(np, mem_start, measure_only);
612 if (rc)
613 goto out;
614
615 /* Look for #address-cells and #size-cells properties. */
616 ip = (int *) get_property(np, "#address-cells", NULL);
617 if (ip != NULL)
618 naddrc = *ip;
619 ip = (int *) get_property(np, "#size-cells", NULL);
620 if (ip != NULL)
621 nsizec = *ip;
622
623 if (!strcmp(np->name, "device-tree") || np->parent == NULL)
624 ifunc = interpret_root_props;
625 else if (np->type == 0)
626 ifunc = NULL;
627 else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
628 ifunc = interpret_pci_props;
629 else if (!strcmp(np->type, "dbdma"))
630 ifunc = interpret_dbdma_props;
631 else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
632 ifunc = interpret_macio_props;
633 else if (!strcmp(np->type, "isa"))
634 ifunc = interpret_isa_props;
635 else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
636 ifunc = interpret_root_props;
637 else if (!((ifunc == interpret_dbdma_props
638 || ifunc == interpret_macio_props)
639 && (!strcmp(np->type, "escc")
640 || !strcmp(np->type, "media-bay"))))
641 ifunc = NULL;
642
643 for (child = np->child; child != NULL; child = child->sibling) {
644 rc = finish_node(child, mem_start, ifunc,
645 naddrc, nsizec, measure_only);
646 if (rc)
647 goto out;
648 }
649out:
650 return rc;
651}
652
653static void __init scan_interrupt_controllers(void)
654{
655 struct device_node *np;
656 int n = 0;
657 char *name, *ic;
658 int iclen;
659
660 for (np = allnodes; np != NULL; np = np->allnext) {
661 ic = get_property(np, "interrupt-controller", &iclen);
662 name = get_property(np, "name", NULL);
663 /* checking iclen makes sure we don't get a false
664 match on /chosen.interrupt_controller */
665 if ((name != NULL
666 && strcmp(name, "interrupt-controller") == 0)
667 || (ic != NULL && iclen == 0
668 && strcmp(name, "AppleKiwi"))) {
669 if (n == 0)
670 dflt_interrupt_controller = np;
671 ++n;
672 }
673 }
674 num_interrupt_controllers = n;
675}
676
677/**
678 * finish_device_tree is called once things are running normally
679 * (i.e. with text and data mapped to the address they were linked at).
680 * It traverses the device tree and fills in some of the additional,
681 * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
682 * mapping is also initialized at this point.
683 */
684void __init finish_device_tree(void)
685{
686 unsigned long start, end, size = 0;
687
688 DBG(" -> finish_device_tree\n");
689
690#ifdef CONFIG_PPC64
691 /* Initialize virtual IRQ map */
692 virt_irq_init();
693#endif
694 scan_interrupt_controllers();
695
696 /*
697 * Finish device-tree (pre-parsing some properties etc...)
698 * We do this in 2 passes. One with "measure_only" set, which
699 * will only measure the amount of memory needed, then we can
700 * allocate that memory, and call finish_node again. However,
701 * we must be careful as most routines will fail nowadays when
702 * prom_alloc() returns 0, so we must make sure our first pass
703 * doesn't start at 0. We pre-initialize size to 16 for that
704 * reason and then remove those additional 16 bytes
705 */
706 size = 16;
707 finish_node(allnodes, &size, NULL, 0, 0, 1);
708 size -= 16;
709 end = start = (unsigned long) __va(lmb_alloc(size, 128));
710 finish_node(allnodes, &end, NULL, 0, 0, 0);
711 BUG_ON(end != start + size);
712
713 DBG(" <- finish_device_tree\n");
714}
715
716static inline char *find_flat_dt_string(u32 offset)
717{
718 return ((char *)initial_boot_params) +
719 initial_boot_params->off_dt_strings + offset;
720}
721
722/**
723 * This function is used to scan the flattened device-tree, it is
724 * used to extract the memory informations at boot before we can
725 * unflatten the tree
726 */
727static int __init scan_flat_dt(int (*it)(unsigned long node,
728 const char *uname, int depth,
729 void *data),
730 void *data)
731{
732 unsigned long p = ((unsigned long)initial_boot_params) +
733 initial_boot_params->off_dt_struct;
734 int rc = 0;
735 int depth = -1;
736
737 do {
738 u32 tag = *((u32 *)p);
739 char *pathp;
740
741 p += 4;
742 if (tag == OF_DT_END_NODE) {
743 depth --;
744 continue;
745 }
746 if (tag == OF_DT_NOP)
747 continue;
748 if (tag == OF_DT_END)
749 break;
750 if (tag == OF_DT_PROP) {
751 u32 sz = *((u32 *)p);
752 p += 8;
753 if (initial_boot_params->version < 0x10)
754 p = _ALIGN(p, sz >= 8 ? 8 : 4);
755 p += sz;
756 p = _ALIGN(p, 4);
757 continue;
758 }
759 if (tag != OF_DT_BEGIN_NODE) {
760 printk(KERN_WARNING "Invalid tag %x scanning flattened"
761 " device tree !\n", tag);
762 return -EINVAL;
763 }
764 depth++;
765 pathp = (char *)p;
766 p = _ALIGN(p + strlen(pathp) + 1, 4);
767 if ((*pathp) == '/') {
768 char *lp, *np;
769 for (lp = NULL, np = pathp; *np; np++)
770 if ((*np) == '/')
771 lp = np+1;
772 if (lp != NULL)
773 pathp = lp;
774 }
775 rc = it(p, pathp, depth, data);
776 if (rc != 0)
777 break;
778 } while(1);
779
780 return rc;
781}
782
783/**
784 * This function can be used within scan_flattened_dt callback to get
785 * access to properties
786 */
787static void* __init get_flat_dt_prop(unsigned long node, const char *name,
788 unsigned long *size)
789{
790 unsigned long p = node;
791
792 do {
793 u32 tag = *((u32 *)p);
794 u32 sz, noff;
795 const char *nstr;
796
797 p += 4;
798 if (tag == OF_DT_NOP)
799 continue;
800 if (tag != OF_DT_PROP)
801 return NULL;
802
803 sz = *((u32 *)p);
804 noff = *((u32 *)(p + 4));
805 p += 8;
806 if (initial_boot_params->version < 0x10)
807 p = _ALIGN(p, sz >= 8 ? 8 : 4);
808
809 nstr = find_flat_dt_string(noff);
810 if (nstr == NULL) {
811 printk(KERN_WARNING "Can't find property index"
812 " name !\n");
813 return NULL;
814 }
815 if (strcmp(name, nstr) == 0) {
816 if (size)
817 *size = sz;
818 return (void *)p;
819 }
820 p += sz;
821 p = _ALIGN(p, 4);
822 } while(1);
823}
824
825static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
826 unsigned long align)
827{
828 void *res;
829
830 *mem = _ALIGN(*mem, align);
831 res = (void *)*mem;
832 *mem += size;
833
834 return res;
835}
836
837static unsigned long __init unflatten_dt_node(unsigned long mem,
838 unsigned long *p,
839 struct device_node *dad,
840 struct device_node ***allnextpp,
841 unsigned long fpsize)
842{
843 struct device_node *np;
844 struct property *pp, **prev_pp = NULL;
845 char *pathp;
846 u32 tag;
847 unsigned int l, allocl;
848 int has_name = 0;
849 int new_format = 0;
850
851 tag = *((u32 *)(*p));
852 if (tag != OF_DT_BEGIN_NODE) {
853 printk("Weird tag at start of node: %x\n", tag);
854 return mem;
855 }
856 *p += 4;
857 pathp = (char *)*p;
858 l = allocl = strlen(pathp) + 1;
859 *p = _ALIGN(*p + l, 4);
860
861 /* version 0x10 has a more compact unit name here instead of the full
862 * path. we accumulate the full path size using "fpsize", we'll rebuild
863 * it later. We detect this because the first character of the name is
864 * not '/'.
865 */
866 if ((*pathp) != '/') {
867 new_format = 1;
868 if (fpsize == 0) {
869 /* root node: special case. fpsize accounts for path
870 * plus terminating zero. root node only has '/', so
871 * fpsize should be 2, but we want to avoid the first
872 * level nodes to have two '/' so we use fpsize 1 here
873 */
874 fpsize = 1;
875 allocl = 2;
876 } else {
877 /* account for '/' and path size minus terminal 0
878 * already in 'l'
879 */
880 fpsize += l;
881 allocl = fpsize;
882 }
883 }
884
885
886 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
887 __alignof__(struct device_node));
888 if (allnextpp) {
889 memset(np, 0, sizeof(*np));
890 np->full_name = ((char*)np) + sizeof(struct device_node);
891 if (new_format) {
892 char *p = np->full_name;
893 /* rebuild full path for new format */
894 if (dad && dad->parent) {
895 strcpy(p, dad->full_name);
896#ifdef DEBUG
897 if ((strlen(p) + l + 1) != allocl) {
898 DBG("%s: p: %d, l: %d, a: %d\n",
899 pathp, strlen(p), l, allocl);
900 }
901#endif
902 p += strlen(p);
903 }
904 *(p++) = '/';
905 memcpy(p, pathp, l);
906 } else
907 memcpy(np->full_name, pathp, l);
908 prev_pp = &np->properties;
909 **allnextpp = np;
910 *allnextpp = &np->allnext;
911 if (dad != NULL) {
912 np->parent = dad;
913 /* we temporarily use the next field as `last_child'*/
914 if (dad->next == 0)
915 dad->child = np;
916 else
917 dad->next->sibling = np;
918 dad->next = np;
919 }
920 kref_init(&np->kref);
921 }
922 while(1) {
923 u32 sz, noff;
924 char *pname;
925
926 tag = *((u32 *)(*p));
927 if (tag == OF_DT_NOP) {
928 *p += 4;
929 continue;
930 }
931 if (tag != OF_DT_PROP)
932 break;
933 *p += 4;
934 sz = *((u32 *)(*p));
935 noff = *((u32 *)((*p) + 4));
936 *p += 8;
937 if (initial_boot_params->version < 0x10)
938 *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
939
940 pname = find_flat_dt_string(noff);
941 if (pname == NULL) {
942 printk("Can't find property name in list !\n");
943 break;
944 }
945 if (strcmp(pname, "name") == 0)
946 has_name = 1;
947 l = strlen(pname) + 1;
948 pp = unflatten_dt_alloc(&mem, sizeof(struct property),
949 __alignof__(struct property));
950 if (allnextpp) {
951 if (strcmp(pname, "linux,phandle") == 0) {
952 np->node = *((u32 *)*p);
953 if (np->linux_phandle == 0)
954 np->linux_phandle = np->node;
955 }
956 if (strcmp(pname, "ibm,phandle") == 0)
957 np->linux_phandle = *((u32 *)*p);
958 pp->name = pname;
959 pp->length = sz;
960 pp->value = (void *)*p;
961 *prev_pp = pp;
962 prev_pp = &pp->next;
963 }
964 *p = _ALIGN((*p) + sz, 4);
965 }
966 /* with version 0x10 we may not have the name property, recreate
967 * it here from the unit name if absent
968 */
969 if (!has_name) {
970 char *p = pathp, *ps = pathp, *pa = NULL;
971 int sz;
972
973 while (*p) {
974 if ((*p) == '@')
975 pa = p;
976 if ((*p) == '/')
977 ps = p + 1;
978 p++;
979 }
980 if (pa < ps)
981 pa = p;
982 sz = (pa - ps) + 1;
983 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
984 __alignof__(struct property));
985 if (allnextpp) {
986 pp->name = "name";
987 pp->length = sz;
988 pp->value = (unsigned char *)(pp + 1);
989 *prev_pp = pp;
990 prev_pp = &pp->next;
991 memcpy(pp->value, ps, sz - 1);
992 ((char *)pp->value)[sz - 1] = 0;
993 DBG("fixed up name for %s -> %s\n", pathp, pp->value);
994 }
995 }
996 if (allnextpp) {
997 *prev_pp = NULL;
998 np->name = get_property(np, "name", NULL);
999 np->type = get_property(np, "device_type", NULL);
1000
1001 if (!np->name)
1002 np->name = "<NULL>";
1003 if (!np->type)
1004 np->type = "<NULL>";
1005 }
1006 while (tag == OF_DT_BEGIN_NODE) {
1007 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
1008 tag = *((u32 *)(*p));
1009 }
1010 if (tag != OF_DT_END_NODE) {
1011 printk("Weird tag at end of node: %x\n", tag);
1012 return mem;
1013 }
1014 *p += 4;
1015 return mem;
1016}
1017
1018
1019/**
1020 * unflattens the device-tree passed by the firmware, creating the
1021 * tree of struct device_node. It also fills the "name" and "type"
1022 * pointers of the nodes so the normal device-tree walking functions
1023 * can be used (this used to be done by finish_device_tree)
1024 */
1025void __init unflatten_device_tree(void)
1026{
1027 unsigned long start, mem, size;
1028 struct device_node **allnextp = &allnodes;
1029 char *p = NULL;
1030 int l = 0;
1031
1032 DBG(" -> unflatten_device_tree()\n");
1033
1034 /* First pass, scan for size */
1035 start = ((unsigned long)initial_boot_params) +
1036 initial_boot_params->off_dt_struct;
1037 size = unflatten_dt_node(0, &start, NULL, NULL, 0);
1038 size = (size | 3) + 1;
1039
1040 DBG(" size is %lx, allocating...\n", size);
1041
1042 /* Allocate memory for the expanded device tree */
1043 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
1044 if (!mem) {
1045 DBG("Couldn't allocate memory with lmb_alloc()!\n");
1046 panic("Couldn't allocate memory with lmb_alloc()!\n");
1047 }
1048 mem = (unsigned long) __va(mem);
1049
1050 ((u32 *)mem)[size / 4] = 0xdeadbeef;
1051
1052 DBG(" unflattening %lx...\n", mem);
1053
1054 /* Second pass, do actual unflattening */
1055 start = ((unsigned long)initial_boot_params) +
1056 initial_boot_params->off_dt_struct;
1057 unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
1058 if (*((u32 *)start) != OF_DT_END)
1059 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
1060 if (((u32 *)mem)[size / 4] != 0xdeadbeef)
1061 printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
1062 ((u32 *)mem)[size / 4] );
1063 *allnextp = NULL;
1064
1065 /* Get pointer to OF "/chosen" node for use everywhere */
1066 of_chosen = of_find_node_by_path("/chosen");
1067 if (of_chosen == NULL)
1068 of_chosen = of_find_node_by_path("/chosen@0");
1069
1070 /* Retreive command line */
1071 if (of_chosen != NULL) {
1072 p = (char *)get_property(of_chosen, "bootargs", &l);
1073 if (p != NULL && l > 0)
1074 strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
1075 }
1076#ifdef CONFIG_CMDLINE
1077 if (l == 0 || (l == 1 && (*p) == 0))
1078 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1079#endif /* CONFIG_CMDLINE */
1080
1081 DBG("Command line is: %s\n", cmd_line);
1082
1083 DBG(" <- unflatten_device_tree()\n");
1084}
1085
1086
1087static int __init early_init_dt_scan_cpus(unsigned long node,
1088 const char *uname, int depth, void *data)
1089{
1090 char *type = get_flat_dt_prop(node, "device_type", NULL);
1091 u32 *prop;
1092 unsigned long size = 0;
1093
1094 /* We are scanning "cpu" nodes only */
1095 if (type == NULL || strcmp(type, "cpu") != 0)
1096 return 0;
1097
1098#ifdef CONFIG_PPC_PSERIES
1099 /* On LPAR, look for the first ibm,pft-size property for the hash table size
1100 */
1101 if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) {
1102 u32 *pft_size;
1103 pft_size = get_flat_dt_prop(node, "ibm,pft-size", NULL);
1104 if (pft_size != NULL) {
1105 /* pft_size[0] is the NUMA CEC cookie */
1106 ppc64_pft_size = pft_size[1];
1107 }
1108 }
1109#endif
1110
1111 boot_cpuid = 0;
1112 boot_cpuid_phys = 0;
1113 if (initial_boot_params && initial_boot_params->version >= 2) {
1114 /* version 2 of the kexec param format adds the phys cpuid
1115 * of booted proc.
1116 */
1117 boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
1118 } else {
1119 /* Check if it's the boot-cpu, set it's hw index now */
1120 if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) {
1121 prop = get_flat_dt_prop(node, "reg", NULL);
1122 if (prop != NULL)
1123 boot_cpuid_phys = *prop;
1124 }
1125 }
1126 set_hard_smp_processor_id(0, boot_cpuid_phys);
1127
1128#ifdef CONFIG_ALTIVEC
1129 /* Check if we have a VMX and eventually update CPU features */
1130 prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", &size);
1131 if (prop && (*prop) > 0) {
1132 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1133 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1134 }
1135
1136 /* Same goes for Apple's "altivec" property */
1137 prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL);
1138 if (prop) {
1139 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1140 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1141 }
1142#endif /* CONFIG_ALTIVEC */
1143
1144#ifdef CONFIG_PPC_PSERIES
1145 /*
1146 * Check for an SMT capable CPU and set the CPU feature. We do
1147 * this by looking at the size of the ibm,ppc-interrupt-server#s
1148 * property
1149 */
1150 prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
1151 &size);
1152 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1153 if (prop && ((size / sizeof(u32)) > 1))
1154 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1155#endif
1156
1157 return 0;
1158}
1159
1160static int __init early_init_dt_scan_chosen(unsigned long node,
1161 const char *uname, int depth, void *data)
1162{
1163 u32 *prop;
1164 unsigned long *lprop;
1165
1166 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1167
1168 if (depth != 1 ||
1169 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
1170 return 0;
1171
1172 /* get platform type */
1173 prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL);
1174 if (prop == NULL)
1175 return 0;
1176#ifdef CONFIG_PPC64
1177 systemcfg->platform = *prop;
1178#else
1179#ifdef CONFIG_PPC_MULTIPLATFORM
1180 _machine = *prop;
1181#endif
1182#endif
1183
1184#ifdef CONFIG_PPC64
1185 /* check if iommu is forced on or off */
1186 if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1187 iommu_is_off = 1;
1188 if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1189 iommu_force_on = 1;
1190#endif
1191
1192 lprop = get_flat_dt_prop(node, "linux,memory-limit", NULL);
1193 if (lprop)
1194 memory_limit = *lprop;
1195
1196#ifdef CONFIG_PPC64
1197 lprop = get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1198 if (lprop)
1199 tce_alloc_start = *lprop;
1200 lprop = get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1201 if (lprop)
1202 tce_alloc_end = *lprop;
1203#endif
1204
1205#ifdef CONFIG_PPC_RTAS
1206 /* To help early debugging via the front panel, we retreive a minimal
1207 * set of RTAS infos now if available
1208 */
1209 {
1210 u64 *basep, *entryp;
1211
1212 basep = get_flat_dt_prop(node, "linux,rtas-base", NULL);
1213 entryp = get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1214 prop = get_flat_dt_prop(node, "linux,rtas-size", NULL);
1215 if (basep && entryp && prop) {
1216 rtas.base = *basep;
1217 rtas.entry = *entryp;
1218 rtas.size = *prop;
1219 }
1220 }
1221#endif /* CONFIG_PPC_RTAS */
1222
1223 /* break now */
1224 return 1;
1225}
1226
1227static int __init early_init_dt_scan_root(unsigned long node,
1228 const char *uname, int depth, void *data)
1229{
1230 u32 *prop;
1231
1232 if (depth != 0)
1233 return 0;
1234
1235 prop = get_flat_dt_prop(node, "#size-cells", NULL);
1236 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1237 DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1238
1239 prop = get_flat_dt_prop(node, "#address-cells", NULL);
1240 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1241 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1242
1243 /* break now */
1244 return 1;
1245}
1246
1247static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1248{
1249 cell_t *p = *cellp;
1250 unsigned long r;
1251
1252 /* Ignore more than 2 cells */
1253 while (s > sizeof(unsigned long) / 4) {
1254 p++;
1255 s--;
1256 }
1257 r = *p++;
1258#ifdef CONFIG_PPC64
1259 if (s > 1) {
1260 r <<= 32;
1261 r |= *(p++);
1262 s--;
1263 }
1264#endif
1265
1266 *cellp = p;
1267 return r;
1268}
1269
1270
1271static int __init early_init_dt_scan_memory(unsigned long node,
1272 const char *uname, int depth, void *data)
1273{
1274 char *type = get_flat_dt_prop(node, "device_type", NULL);
1275 cell_t *reg, *endp;
1276 unsigned long l;
1277
1278 /* We are scanning "memory" nodes only */
1279 if (type == NULL || strcmp(type, "memory") != 0)
1280 return 0;
1281
1282 reg = (cell_t *)get_flat_dt_prop(node, "reg", &l);
1283 if (reg == NULL)
1284 return 0;
1285
1286 endp = reg + (l / sizeof(cell_t));
1287
1288 DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n",
1289 uname, l, reg[0], reg[1], reg[2], reg[3]);
1290
1291 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1292 unsigned long base, size;
1293
1294 base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1295 size = dt_mem_next_cell(dt_root_size_cells, &reg);
1296
1297 if (size == 0)
1298 continue;
1299 DBG(" - %lx , %lx\n", base, size);
1300#ifdef CONFIG_PPC64
1301 if (iommu_is_off) {
1302 if (base >= 0x80000000ul)
1303 continue;
1304 if ((base + size) > 0x80000000ul)
1305 size = 0x80000000ul - base;
1306 }
1307#endif
1308 lmb_add(base, size);
1309 }
1310 return 0;
1311}
1312
1313static void __init early_reserve_mem(void)
1314{
1315 unsigned long base, size;
1316 unsigned long *reserve_map;
1317
1318 reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
1319 initial_boot_params->off_mem_rsvmap);
1320 while (1) {
1321 base = *(reserve_map++);
1322 size = *(reserve_map++);
1323 if (size == 0)
1324 break;
1325 DBG("reserving: %lx -> %lx\n", base, size);
1326 lmb_reserve(base, size);
1327 }
1328
1329#if 0
1330 DBG("memory reserved, lmbs :\n");
1331 lmb_dump_all();
1332#endif
1333}
1334
1335void __init early_init_devtree(void *params)
1336{
1337 DBG(" -> early_init_devtree()\n");
1338
1339 /* Setup flat device-tree pointer */
1340 initial_boot_params = params;
1341
1342 /* Retrieve various informations from the /chosen node of the
1343 * device-tree, including the platform type, initrd location and
1344 * size, TCE reserve, and more ...
1345 */
1346 scan_flat_dt(early_init_dt_scan_chosen, NULL);
1347
1348 /* Scan memory nodes and rebuild LMBs */
1349 lmb_init();
1350 scan_flat_dt(early_init_dt_scan_root, NULL);
1351 scan_flat_dt(early_init_dt_scan_memory, NULL);
1352 lmb_enforce_memory_limit(memory_limit);
1353 lmb_analyze();
1354#ifdef CONFIG_PPC64
1355 systemcfg->physicalMemorySize = lmb_phys_mem_size();
1356#endif
1357 lmb_reserve(0, __pa(klimit));
1358
1359 DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1360
1361 /* Reserve LMB regions used by kernel, initrd, dt, etc... */
1362 early_reserve_mem();
1363
1364 DBG("Scanning CPUs ...\n");
1365
1366 /* Retreive hash table size from flattened tree plus other
1367 * CPU related informations (altivec support, boot CPU ID, ...)
1368 */
1369 scan_flat_dt(early_init_dt_scan_cpus, NULL);
1370
1371 DBG(" <- early_init_devtree()\n");
1372}
1373
1374#undef printk
1375
1376int
1377prom_n_addr_cells(struct device_node* np)
1378{
1379 int* ip;
1380 do {
1381 if (np->parent)
1382 np = np->parent;
1383 ip = (int *) get_property(np, "#address-cells", NULL);
1384 if (ip != NULL)
1385 return *ip;
1386 } while (np->parent);
1387 /* No #address-cells property for the root node, default to 1 */
1388 return 1;
1389}
1390
1391int
1392prom_n_size_cells(struct device_node* np)
1393{
1394 int* ip;
1395 do {
1396 if (np->parent)
1397 np = np->parent;
1398 ip = (int *) get_property(np, "#size-cells", NULL);
1399 if (ip != NULL)
1400 return *ip;
1401 } while (np->parent);
1402 /* No #size-cells property for the root node, default to 1 */
1403 return 1;
1404}
1405
1406/**
1407 * Work out the sense (active-low level / active-high edge)
1408 * of each interrupt from the device tree.
1409 */
1410void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1411{
1412 struct device_node *np;
1413 int i, j;
1414
1415 /* default to level-triggered */
1416 memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off);
1417
1418 for (np = allnodes; np != 0; np = np->allnext) {
1419 for (j = 0; j < np->n_intrs; j++) {
1420 i = np->intrs[j].line;
1421 if (i >= off && i < max)
1422 senses[i-off] = np->intrs[j].sense;
1423 }
1424 }
1425}
1426
1427/**
1428 * Construct and return a list of the device_nodes with a given name.
1429 */
1430struct device_node *find_devices(const char *name)
1431{
1432 struct device_node *head, **prevp, *np;
1433
1434 prevp = &head;
1435 for (np = allnodes; np != 0; np = np->allnext) {
1436 if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1437 *prevp = np;
1438 prevp = &np->next;
1439 }
1440 }
1441 *prevp = NULL;
1442 return head;
1443}
1444EXPORT_SYMBOL(find_devices);
1445
1446/**
1447 * Construct and return a list of the device_nodes with a given type.
1448 */
1449struct device_node *find_type_devices(const char *type)
1450{
1451 struct device_node *head, **prevp, *np;
1452
1453 prevp = &head;
1454 for (np = allnodes; np != 0; np = np->allnext) {
1455 if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1456 *prevp = np;
1457 prevp = &np->next;
1458 }
1459 }
1460 *prevp = NULL;
1461 return head;
1462}
1463EXPORT_SYMBOL(find_type_devices);
1464
1465/**
1466 * Returns all nodes linked together
1467 */
1468struct device_node *find_all_nodes(void)
1469{
1470 struct device_node *head, **prevp, *np;
1471
1472 prevp = &head;
1473 for (np = allnodes; np != 0; np = np->allnext) {
1474 *prevp = np;
1475 prevp = &np->next;
1476 }
1477 *prevp = NULL;
1478 return head;
1479}
1480EXPORT_SYMBOL(find_all_nodes);
1481
1482/** Checks if the given "compat" string matches one of the strings in
1483 * the device's "compatible" property
1484 */
1485int device_is_compatible(struct device_node *device, const char *compat)
1486{
1487 const char* cp;
1488 int cplen, l;
1489
1490 cp = (char *) get_property(device, "compatible", &cplen);
1491 if (cp == NULL)
1492 return 0;
1493 while (cplen > 0) {
1494 if (strncasecmp(cp, compat, strlen(compat)) == 0)
1495 return 1;
1496 l = strlen(cp) + 1;
1497 cp += l;
1498 cplen -= l;
1499 }
1500
1501 return 0;
1502}
1503EXPORT_SYMBOL(device_is_compatible);
1504
1505
1506/**
1507 * Indicates whether the root node has a given value in its
1508 * compatible property.
1509 */
1510int machine_is_compatible(const char *compat)
1511{
1512 struct device_node *root;
1513 int rc = 0;
1514
1515 root = of_find_node_by_path("/");
1516 if (root) {
1517 rc = device_is_compatible(root, compat);
1518 of_node_put(root);
1519 }
1520 return rc;
1521}
1522EXPORT_SYMBOL(machine_is_compatible);
1523
1524/**
1525 * Construct and return a list of the device_nodes with a given type
1526 * and compatible property.
1527 */
1528struct device_node *find_compatible_devices(const char *type,
1529 const char *compat)
1530{
1531 struct device_node *head, **prevp, *np;
1532
1533 prevp = &head;
1534 for (np = allnodes; np != 0; np = np->allnext) {
1535 if (type != NULL
1536 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1537 continue;
1538 if (device_is_compatible(np, compat)) {
1539 *prevp = np;
1540 prevp = &np->next;
1541 }
1542 }
1543 *prevp = NULL;
1544 return head;
1545}
1546EXPORT_SYMBOL(find_compatible_devices);
1547
1548/**
1549 * Find the device_node with a given full_name.
1550 */
1551struct device_node *find_path_device(const char *path)
1552{
1553 struct device_node *np;
1554
1555 for (np = allnodes; np != 0; np = np->allnext)
1556 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1557 return np;
1558 return NULL;
1559}
1560EXPORT_SYMBOL(find_path_device);
1561
1562/*******
1563 *
1564 * New implementation of the OF "find" APIs, return a refcounted
1565 * object, call of_node_put() when done. The device tree and list
1566 * are protected by a rw_lock.
1567 *
1568 * Note that property management will need some locking as well,
1569 * this isn't dealt with yet.
1570 *
1571 *******/
1572
1573/**
1574 * of_find_node_by_name - Find a node by its "name" property
1575 * @from: The node to start searching from or NULL, the node
1576 * you pass will not be searched, only the next one
1577 * will; typically, you pass what the previous call
1578 * returned. of_node_put() will be called on it
1579 * @name: The name string to match against
1580 *
1581 * Returns a node pointer with refcount incremented, use
1582 * of_node_put() on it when done.
1583 */
1584struct device_node *of_find_node_by_name(struct device_node *from,
1585 const char *name)
1586{
1587 struct device_node *np;
1588
1589 read_lock(&devtree_lock);
1590 np = from ? from->allnext : allnodes;
1591 for (; np != 0; np = np->allnext)
1592 if (np->name != 0 && strcasecmp(np->name, name) == 0
1593 && of_node_get(np))
1594 break;
1595 if (from)
1596 of_node_put(from);
1597 read_unlock(&devtree_lock);
1598 return np;
1599}
1600EXPORT_SYMBOL(of_find_node_by_name);
1601
1602/**
1603 * of_find_node_by_type - Find a node by its "device_type" property
1604 * @from: The node to start searching from or NULL, the node
1605 * you pass will not be searched, only the next one
1606 * will; typically, you pass what the previous call
1607 * returned. of_node_put() will be called on it
1608 * @name: The type string to match against
1609 *
1610 * Returns a node pointer with refcount incremented, use
1611 * of_node_put() on it when done.
1612 */
1613struct device_node *of_find_node_by_type(struct device_node *from,
1614 const char *type)
1615{
1616 struct device_node *np;
1617
1618 read_lock(&devtree_lock);
1619 np = from ? from->allnext : allnodes;
1620 for (; np != 0; np = np->allnext)
1621 if (np->type != 0 && strcasecmp(np->type, type) == 0
1622 && of_node_get(np))
1623 break;
1624 if (from)
1625 of_node_put(from);
1626 read_unlock(&devtree_lock);
1627 return np;
1628}
1629EXPORT_SYMBOL(of_find_node_by_type);
1630
1631/**
1632 * of_find_compatible_node - Find a node based on type and one of the
1633 * tokens in its "compatible" property
1634 * @from: The node to start searching from or NULL, the node
1635 * you pass will not be searched, only the next one
1636 * will; typically, you pass what the previous call
1637 * returned. of_node_put() will be called on it
1638 * @type: The type string to match "device_type" or NULL to ignore
1639 * @compatible: The string to match to one of the tokens in the device
1640 * "compatible" list.
1641 *
1642 * Returns a node pointer with refcount incremented, use
1643 * of_node_put() on it when done.
1644 */
1645struct device_node *of_find_compatible_node(struct device_node *from,
1646 const char *type, const char *compatible)
1647{
1648 struct device_node *np;
1649
1650 read_lock(&devtree_lock);
1651 np = from ? from->allnext : allnodes;
1652 for (; np != 0; np = np->allnext) {
1653 if (type != NULL
1654 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1655 continue;
1656 if (device_is_compatible(np, compatible) && of_node_get(np))
1657 break;
1658 }
1659 if (from)
1660 of_node_put(from);
1661 read_unlock(&devtree_lock);
1662 return np;
1663}
1664EXPORT_SYMBOL(of_find_compatible_node);
1665
1666/**
1667 * of_find_node_by_path - Find a node matching a full OF path
1668 * @path: The full path to match
1669 *
1670 * Returns a node pointer with refcount incremented, use
1671 * of_node_put() on it when done.
1672 */
1673struct device_node *of_find_node_by_path(const char *path)
1674{
1675 struct device_node *np = allnodes;
1676
1677 read_lock(&devtree_lock);
1678 for (; np != 0; np = np->allnext) {
1679 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1680 && of_node_get(np))
1681 break;
1682 }
1683 read_unlock(&devtree_lock);
1684 return np;
1685}
1686EXPORT_SYMBOL(of_find_node_by_path);
1687
1688/**
1689 * of_find_node_by_phandle - Find a node given a phandle
1690 * @handle: phandle of the node to find
1691 *
1692 * Returns a node pointer with refcount incremented, use
1693 * of_node_put() on it when done.
1694 */
1695struct device_node *of_find_node_by_phandle(phandle handle)
1696{
1697 struct device_node *np;
1698
1699 read_lock(&devtree_lock);
1700 for (np = allnodes; np != 0; np = np->allnext)
1701 if (np->linux_phandle == handle)
1702 break;
1703 if (np)
1704 of_node_get(np);
1705 read_unlock(&devtree_lock);
1706 return np;
1707}
1708EXPORT_SYMBOL(of_find_node_by_phandle);
1709
1710/**
1711 * of_find_all_nodes - Get next node in global list
1712 * @prev: Previous node or NULL to start iteration
1713 * of_node_put() will be called on it
1714 *
1715 * Returns a node pointer with refcount incremented, use
1716 * of_node_put() on it when done.
1717 */
1718struct device_node *of_find_all_nodes(struct device_node *prev)
1719{
1720 struct device_node *np;
1721
1722 read_lock(&devtree_lock);
1723 np = prev ? prev->allnext : allnodes;
1724 for (; np != 0; np = np->allnext)
1725 if (of_node_get(np))
1726 break;
1727 if (prev)
1728 of_node_put(prev);
1729 read_unlock(&devtree_lock);
1730 return np;
1731}
1732EXPORT_SYMBOL(of_find_all_nodes);
1733
1734/**
1735 * of_get_parent - Get a node's parent if any
1736 * @node: Node to get parent
1737 *
1738 * Returns a node pointer with refcount incremented, use
1739 * of_node_put() on it when done.
1740 */
1741struct device_node *of_get_parent(const struct device_node *node)
1742{
1743 struct device_node *np;
1744
1745 if (!node)
1746 return NULL;
1747
1748 read_lock(&devtree_lock);
1749 np = of_node_get(node->parent);
1750 read_unlock(&devtree_lock);
1751 return np;
1752}
1753EXPORT_SYMBOL(of_get_parent);
1754
1755/**
1756 * of_get_next_child - Iterate a node childs
1757 * @node: parent node
1758 * @prev: previous child of the parent node, or NULL to get first
1759 *
1760 * Returns a node pointer with refcount incremented, use
1761 * of_node_put() on it when done.
1762 */
1763struct device_node *of_get_next_child(const struct device_node *node,
1764 struct device_node *prev)
1765{
1766 struct device_node *next;
1767
1768 read_lock(&devtree_lock);
1769 next = prev ? prev->sibling : node->child;
1770 for (; next != 0; next = next->sibling)
1771 if (of_node_get(next))
1772 break;
1773 if (prev)
1774 of_node_put(prev);
1775 read_unlock(&devtree_lock);
1776 return next;
1777}
1778EXPORT_SYMBOL(of_get_next_child);
1779
1780/**
1781 * of_node_get - Increment refcount of a node
1782 * @node: Node to inc refcount, NULL is supported to
1783 * simplify writing of callers
1784 *
1785 * Returns node.
1786 */
1787struct device_node *of_node_get(struct device_node *node)
1788{
1789 if (node)
1790 kref_get(&node->kref);
1791 return node;
1792}
1793EXPORT_SYMBOL(of_node_get);
1794
1795static inline struct device_node * kref_to_device_node(struct kref *kref)
1796{
1797 return container_of(kref, struct device_node, kref);
1798}
1799
1800/**
1801 * of_node_release - release a dynamically allocated node
1802 * @kref: kref element of the node to be released
1803 *
1804 * In of_node_put() this function is passed to kref_put()
1805 * as the destructor.
1806 */
1807static void of_node_release(struct kref *kref)
1808{
1809 struct device_node *node = kref_to_device_node(kref);
1810 struct property *prop = node->properties;
1811
1812 if (!OF_IS_DYNAMIC(node))
1813 return;
1814 while (prop) {
1815 struct property *next = prop->next;
1816 kfree(prop->name);
1817 kfree(prop->value);
1818 kfree(prop);
1819 prop = next;
1820 }
1821 kfree(node->intrs);
1822 kfree(node->addrs);
1823 kfree(node->full_name);
1824 kfree(node->data);
1825 kfree(node);
1826}
1827
1828/**
1829 * of_node_put - Decrement refcount of a node
1830 * @node: Node to dec refcount, NULL is supported to
1831 * simplify writing of callers
1832 *
1833 */
1834void of_node_put(struct device_node *node)
1835{
1836 if (node)
1837 kref_put(&node->kref, of_node_release);
1838}
1839EXPORT_SYMBOL(of_node_put);
1840
1841/*
1842 * Plug a device node into the tree and global list.
1843 */
1844void of_attach_node(struct device_node *np)
1845{
1846 write_lock(&devtree_lock);
1847 np->sibling = np->parent->child;
1848 np->allnext = allnodes;
1849 np->parent->child = np;
1850 allnodes = np;
1851 write_unlock(&devtree_lock);
1852}
1853
1854/*
1855 * "Unplug" a node from the device tree. The caller must hold
1856 * a reference to the node. The memory associated with the node
1857 * is not freed until its refcount goes to zero.
1858 */
1859void of_detach_node(const struct device_node *np)
1860{
1861 struct device_node *parent;
1862
1863 write_lock(&devtree_lock);
1864
1865 parent = np->parent;
1866
1867 if (allnodes == np)
1868 allnodes = np->allnext;
1869 else {
1870 struct device_node *prev;
1871 for (prev = allnodes;
1872 prev->allnext != np;
1873 prev = prev->allnext)
1874 ;
1875 prev->allnext = np->allnext;
1876 }
1877
1878 if (parent->child == np)
1879 parent->child = np->sibling;
1880 else {
1881 struct device_node *prevsib;
1882 for (prevsib = np->parent->child;
1883 prevsib->sibling != np;
1884 prevsib = prevsib->sibling)
1885 ;
1886 prevsib->sibling = np->sibling;
1887 }
1888
1889 write_unlock(&devtree_lock);
1890}
1891
1892#ifdef CONFIG_PPC_PSERIES
1893/*
1894 * Fix up the uninitialized fields in a new device node:
1895 * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1896 *
1897 * A lot of boot-time code is duplicated here, because functions such
1898 * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1899 * slab allocator.
1900 *
1901 * This should probably be split up into smaller chunks.
1902 */
1903
1904static int of_finish_dynamic_node(struct device_node *node,
1905 unsigned long *unused1, int unused2,
1906 int unused3, int unused4)
1907{
1908 struct device_node *parent = of_get_parent(node);
1909 int err = 0;
1910 phandle *ibm_phandle;
1911
1912 node->name = get_property(node, "name", NULL);
1913 node->type = get_property(node, "device_type", NULL);
1914
1915 if (!parent) {
1916 err = -ENODEV;
1917 goto out;
1918 }
1919
1920 /* We don't support that function on PowerMac, at least
1921 * not yet
1922 */
1923 if (systemcfg->platform == PLATFORM_POWERMAC)
1924 return -ENODEV;
1925
1926 /* fix up new node's linux_phandle field */
1927 if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
1928 node->linux_phandle = *ibm_phandle;
1929
1930out:
1931 of_node_put(parent);
1932 return err;
1933}
1934
1935static int prom_reconfig_notifier(struct notifier_block *nb,
1936 unsigned long action, void *node)
1937{
1938 int err;
1939
1940 switch (action) {
1941 case PSERIES_RECONFIG_ADD:
1942 err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
1943 if (err < 0) {
1944 printk(KERN_ERR "finish_node returned %d\n", err);
1945 err = NOTIFY_BAD;
1946 }
1947 break;
1948 default:
1949 err = NOTIFY_DONE;
1950 break;
1951 }
1952 return err;
1953}
1954
1955static struct notifier_block prom_reconfig_nb = {
1956 .notifier_call = prom_reconfig_notifier,
1957 .priority = 10, /* This one needs to run first */
1958};
1959
1960static int __init prom_reconfig_setup(void)
1961{
1962 return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1963}
1964__initcall(prom_reconfig_setup);
1965#endif
1966
1967/*
1968 * Find a property with a given name for a given node
1969 * and return the value.
1970 */
1971unsigned char *get_property(struct device_node *np, const char *name,
1972 int *lenp)
1973{
1974 struct property *pp;
1975
1976 for (pp = np->properties; pp != 0; pp = pp->next)
1977 if (strcmp(pp->name, name) == 0) {
1978 if (lenp != 0)
1979 *lenp = pp->length;
1980 return pp->value;
1981 }
1982 return NULL;
1983}
1984EXPORT_SYMBOL(get_property);
1985
1986/*
1987 * Add a property to a node
1988 */
1989void prom_add_property(struct device_node* np, struct property* prop)
1990{
1991 struct property **next = &np->properties;
1992
1993 prop->next = NULL;
1994 while (*next)
1995 next = &(*next)->next;
1996 *next = prop;
1997}
1998
1999/* I quickly hacked that one, check against spec ! */
2000static inline unsigned long
2001bus_space_to_resource_flags(unsigned int bus_space)
2002{
2003 u8 space = (bus_space >> 24) & 0xf;
2004 if (space == 0)
2005 space = 0x02;
2006 if (space == 0x02)
2007 return IORESOURCE_MEM;
2008 else if (space == 0x01)
2009 return IORESOURCE_IO;
2010 else {
2011 printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n",
2012 bus_space);
2013 return 0;
2014 }
2015}
2016
2017#ifdef CONFIG_PCI
2018static struct resource *find_parent_pci_resource(struct pci_dev* pdev,
2019 struct address_range *range)
2020{
2021 unsigned long mask;
2022 int i;
2023
2024 /* Check this one */
2025 mask = bus_space_to_resource_flags(range->space);
2026 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2027 if ((pdev->resource[i].flags & mask) == mask &&
2028 pdev->resource[i].start <= range->address &&
2029 pdev->resource[i].end > range->address) {
2030 if ((range->address + range->size - 1) > pdev->resource[i].end) {
2031 /* Add better message */
2032 printk(KERN_WARNING "PCI/OF resource overlap !\n");
2033 return NULL;
2034 }
2035 break;
2036 }
2037 }
2038 if (i == DEVICE_COUNT_RESOURCE)
2039 return NULL;
2040 return &pdev->resource[i];
2041}
2042
2043/*
2044 * Request an OF device resource. Currently handles child of PCI devices,
2045 * or other nodes attached to the root node. Ultimately, put some
2046 * link to resources in the OF node.
2047 */
2048struct resource *request_OF_resource(struct device_node* node, int index,
2049 const char* name_postfix)
2050{
2051 struct pci_dev* pcidev;
2052 u8 pci_bus, pci_devfn;
2053 unsigned long iomask;
2054 struct device_node* nd;
2055 struct resource* parent;
2056 struct resource *res = NULL;
2057 int nlen, plen;
2058
2059 if (index >= node->n_addrs)
2060 goto fail;
2061
2062 /* Sanity check on bus space */
2063 iomask = bus_space_to_resource_flags(node->addrs[index].space);
2064 if (iomask & IORESOURCE_MEM)
2065 parent = &iomem_resource;
2066 else if (iomask & IORESOURCE_IO)
2067 parent = &ioport_resource;
2068 else
2069 goto fail;
2070
2071 /* Find a PCI parent if any */
2072 nd = node;
2073 pcidev = NULL;
2074 while (nd) {
2075 if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2076 pcidev = pci_find_slot(pci_bus, pci_devfn);
2077 if (pcidev) break;
2078 nd = nd->parent;
2079 }
2080 if (pcidev)
2081 parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2082 if (!parent) {
2083 printk(KERN_WARNING "request_OF_resource(%s), parent not found\n",
2084 node->name);
2085 goto fail;
2086 }
2087
2088 res = __request_region(parent, node->addrs[index].address,
2089 node->addrs[index].size, NULL);
2090 if (!res)
2091 goto fail;
2092 nlen = strlen(node->name);
2093 plen = name_postfix ? strlen(name_postfix) : 0;
2094 res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL);
2095 if (res->name) {
2096 strcpy((char *)res->name, node->name);
2097 if (plen)
2098 strcpy((char *)res->name+nlen, name_postfix);
2099 }
2100 return res;
2101fail:
2102 return NULL;
2103}
2104EXPORT_SYMBOL(request_OF_resource);
2105
2106int release_OF_resource(struct device_node *node, int index)
2107{
2108 struct pci_dev* pcidev;
2109 u8 pci_bus, pci_devfn;
2110 unsigned long iomask, start, end;
2111 struct device_node* nd;
2112 struct resource* parent;
2113 struct resource *res = NULL;
2114
2115 if (index >= node->n_addrs)
2116 return -EINVAL;
2117
2118 /* Sanity check on bus space */
2119 iomask = bus_space_to_resource_flags(node->addrs[index].space);
2120 if (iomask & IORESOURCE_MEM)
2121 parent = &iomem_resource;
2122 else if (iomask & IORESOURCE_IO)
2123 parent = &ioport_resource;
2124 else
2125 return -EINVAL;
2126
2127 /* Find a PCI parent if any */
2128 nd = node;
2129 pcidev = NULL;
2130 while(nd) {
2131 if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2132 pcidev = pci_find_slot(pci_bus, pci_devfn);
2133 if (pcidev) break;
2134 nd = nd->parent;
2135 }
2136 if (pcidev)
2137 parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2138 if (!parent) {
2139 printk(KERN_WARNING "release_OF_resource(%s), parent not found\n",
2140 node->name);
2141 return -ENODEV;
2142 }
2143
2144 /* Find us in the parent and its childs */
2145 res = parent->child;
2146 start = node->addrs[index].address;
2147 end = start + node->addrs[index].size - 1;
2148 while (res) {
2149 if (res->start == start && res->end == end &&
2150 (res->flags & IORESOURCE_BUSY))
2151 break;
2152 if (res->start <= start && res->end >= end)
2153 res = res->child;
2154 else
2155 res = res->sibling;
2156 }
2157 if (!res)
2158 return -ENODEV;
2159
2160 if (res->name) {
2161 kfree(res->name);
2162 res->name = NULL;
2163 }
2164 release_resource(res);
2165 kfree(res);
2166
2167 return 0;
2168}
2169EXPORT_SYMBOL(release_OF_resource);
2170#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
new file mode 100644
index 000000000000..9750b3cd8ecd
--- /dev/null
+++ b/arch/powerpc/kernel/prom_init.c
@@ -0,0 +1,2109 @@
1/*
2 * Procedures for interfacing to Open Firmware.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#undef DEBUG_PROM
17
18#include <stdarg.h>
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/threads.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/proc_fs.h>
28#include <linux/stringify.h>
29#include <linux/delay.h>
30#include <linux/initrd.h>
31#include <linux/bitops.h>
32#include <asm/prom.h>
33#include <asm/rtas.h>
34#include <asm/page.h>
35#include <asm/processor.h>
36#include <asm/irq.h>
37#include <asm/io.h>
38#include <asm/smp.h>
39#include <asm/system.h>
40#include <asm/mmu.h>
41#include <asm/pgtable.h>
42#include <asm/pci.h>
43#include <asm/iommu.h>
44#include <asm/btext.h>
45#include <asm/sections.h>
46#include <asm/machdep.h>
47
48#ifdef CONFIG_LOGO_LINUX_CLUT224
49#include <linux/linux_logo.h>
50extern const struct linux_logo logo_linux_clut224;
51#endif
52
53/*
54 * Properties whose value is longer than this get excluded from our
55 * copy of the device tree. This value does need to be big enough to
56 * ensure that we don't lose things like the interrupt-map property
57 * on a PCI-PCI bridge.
58 */
59#define MAX_PROPERTY_LENGTH (1UL * 1024 * 1024)
60
61/*
62 * Eventually bump that one up
63 */
64#define DEVTREE_CHUNK_SIZE 0x100000
65
66/*
67 * This is the size of the local memory reserve map that gets copied
68 * into the boot params passed to the kernel. That size is totally
69 * flexible as the kernel just reads the list until it encounters an
70 * entry with size 0, so it can be changed without breaking binary
71 * compatibility
72 */
73#define MEM_RESERVE_MAP_SIZE 8
74
75/*
76 * prom_init() is called very early on, before the kernel text
77 * and data have been mapped to KERNELBASE. At this point the code
78 * is running at whatever address it has been loaded at.
79 * On ppc32 we compile with -mrelocatable, which means that references
80 * to extern and static variables get relocated automatically.
81 * On ppc64 we have to relocate the references explicitly with
82 * RELOC. (Note that strings count as static variables.)
83 *
84 * Because OF may have mapped I/O devices into the area starting at
85 * KERNELBASE, particularly on CHRP machines, we can't safely call
86 * OF once the kernel has been mapped to KERNELBASE. Therefore all
87 * OF calls must be done within prom_init().
88 *
89 * ADDR is used in calls to call_prom. The 4th and following
90 * arguments to call_prom should be 32-bit values.
91 * On ppc64, 64 bit values are truncated to 32 bits (and
92 * fortunately don't get interpreted as two arguments).
93 */
94#ifdef CONFIG_PPC64
95#define RELOC(x) (*PTRRELOC(&(x)))
96#define ADDR(x) (u32) add_reloc_offset((unsigned long)(x))
97#else
98#define RELOC(x) (x)
99#define ADDR(x) (u32) (x)
100#endif
101
102#define PROM_BUG() do { \
103 prom_printf("kernel BUG at %s line 0x%x!\n", \
104 RELOC(__FILE__), __LINE__); \
105 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
106} while (0)
107
108#ifdef DEBUG_PROM
109#define prom_debug(x...) prom_printf(x)
110#else
111#define prom_debug(x...)
112#endif
113
114#ifdef CONFIG_PPC32
115#define PLATFORM_POWERMAC _MACH_Pmac
116#define PLATFORM_CHRP _MACH_chrp
117#endif
118
119
120typedef u32 prom_arg_t;
121
122struct prom_args {
123 u32 service;
124 u32 nargs;
125 u32 nret;
126 prom_arg_t args[10];
127};
128
129struct prom_t {
130 ihandle root;
131 ihandle chosen;
132 int cpu;
133 ihandle stdout;
134 ihandle mmumap;
135};
136
137struct mem_map_entry {
138 unsigned long base;
139 unsigned long size;
140};
141
142typedef u32 cell_t;
143
144extern void __start(unsigned long r3, unsigned long r4, unsigned long r5);
145
146#ifdef CONFIG_PPC64
147extern int enter_prom(struct prom_args *args, unsigned long entry);
148#else
149static inline int enter_prom(struct prom_args *args, unsigned long entry)
150{
151 return ((int (*)(struct prom_args *))entry)(args);
152}
153#endif
154
155extern void copy_and_flush(unsigned long dest, unsigned long src,
156 unsigned long size, unsigned long offset);
157
158/* prom structure */
159static struct prom_t __initdata prom;
160
161static unsigned long prom_entry __initdata;
162
163#define PROM_SCRATCH_SIZE 256
164
165static char __initdata of_stdout_device[256];
166static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
167
168static unsigned long __initdata dt_header_start;
169static unsigned long __initdata dt_struct_start, dt_struct_end;
170static unsigned long __initdata dt_string_start, dt_string_end;
171
172static unsigned long __initdata prom_initrd_start, prom_initrd_end;
173
174#ifdef CONFIG_PPC64
175static int __initdata iommu_force_on;
176static int __initdata ppc64_iommu_off;
177static unsigned long __initdata prom_tce_alloc_start;
178static unsigned long __initdata prom_tce_alloc_end;
179#endif
180
181static int __initdata of_platform;
182
183static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
184
185static unsigned long __initdata prom_memory_limit;
186
187static unsigned long __initdata alloc_top;
188static unsigned long __initdata alloc_top_high;
189static unsigned long __initdata alloc_bottom;
190static unsigned long __initdata rmo_top;
191static unsigned long __initdata ram_top;
192
193static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
194static int __initdata mem_reserve_cnt;
195
196static cell_t __initdata regbuf[1024];
197
198
199#define MAX_CPU_THREADS 2
200
201/* TO GO */
202#ifdef CONFIG_HMT
203struct {
204 unsigned int pir;
205 unsigned int threadid;
206} hmt_thread_data[NR_CPUS];
207#endif /* CONFIG_HMT */
208
209/*
210 * Error results ... some OF calls will return "-1" on error, some
211 * will return 0, some will return either. To simplify, here are
212 * macros to use with any ihandle or phandle return value to check if
213 * it is valid
214 */
215
216#define PROM_ERROR (-1u)
217#define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
218#define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
219
220
221/* This is the one and *ONLY* place where we actually call open
222 * firmware.
223 */
224
225static int __init call_prom(const char *service, int nargs, int nret, ...)
226{
227 int i;
228 struct prom_args args;
229 va_list list;
230
231 args.service = ADDR(service);
232 args.nargs = nargs;
233 args.nret = nret;
234
235 va_start(list, nret);
236 for (i = 0; i < nargs; i++)
237 args.args[i] = va_arg(list, prom_arg_t);
238 va_end(list);
239
240 for (i = 0; i < nret; i++)
241 args.args[nargs+i] = 0;
242
243 if (enter_prom(&args, RELOC(prom_entry)) < 0)
244 return PROM_ERROR;
245
246 return (nret > 0) ? args.args[nargs] : 0;
247}
248
249static int __init call_prom_ret(const char *service, int nargs, int nret,
250 prom_arg_t *rets, ...)
251{
252 int i;
253 struct prom_args args;
254 va_list list;
255
256 args.service = ADDR(service);
257 args.nargs = nargs;
258 args.nret = nret;
259
260 va_start(list, rets);
261 for (i = 0; i < nargs; i++)
262 args.args[i] = va_arg(list, prom_arg_t);
263 va_end(list);
264
265 for (i = 0; i < nret; i++)
266 rets[nargs+i] = 0;
267
268 if (enter_prom(&args, RELOC(prom_entry)) < 0)
269 return PROM_ERROR;
270
271 if (rets != NULL)
272 for (i = 1; i < nret; ++i)
273 rets[i-1] = args.args[nargs+i];
274
275 return (nret > 0) ? args.args[nargs] : 0;
276}
277
278
279static void __init prom_print(const char *msg)
280{
281 const char *p, *q;
282 struct prom_t *_prom = &RELOC(prom);
283
284 if (_prom->stdout == 0)
285 return;
286
287 for (p = msg; *p != 0; p = q) {
288 for (q = p; *q != 0 && *q != '\n'; ++q)
289 ;
290 if (q > p)
291 call_prom("write", 3, 1, _prom->stdout, p, q - p);
292 if (*q == 0)
293 break;
294 ++q;
295 call_prom("write", 3, 1, _prom->stdout, ADDR("\r\n"), 2);
296 }
297}
298
299
300static void __init prom_print_hex(unsigned long val)
301{
302 int i, nibbles = sizeof(val)*2;
303 char buf[sizeof(val)*2+1];
304 struct prom_t *_prom = &RELOC(prom);
305
306 for (i = nibbles-1; i >= 0; i--) {
307 buf[i] = (val & 0xf) + '0';
308 if (buf[i] > '9')
309 buf[i] += ('a'-'0'-10);
310 val >>= 4;
311 }
312 buf[nibbles] = '\0';
313 call_prom("write", 3, 1, _prom->stdout, buf, nibbles);
314}
315
316
317static void __init prom_printf(const char *format, ...)
318{
319 const char *p, *q, *s;
320 va_list args;
321 unsigned long v;
322 struct prom_t *_prom = &RELOC(prom);
323
324 va_start(args, format);
325#ifdef CONFIG_PPC64
326 format = PTRRELOC(format);
327#endif
328 for (p = format; *p != 0; p = q) {
329 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
330 ;
331 if (q > p)
332 call_prom("write", 3, 1, _prom->stdout, p, q - p);
333 if (*q == 0)
334 break;
335 if (*q == '\n') {
336 ++q;
337 call_prom("write", 3, 1, _prom->stdout,
338 ADDR("\r\n"), 2);
339 continue;
340 }
341 ++q;
342 if (*q == 0)
343 break;
344 switch (*q) {
345 case 's':
346 ++q;
347 s = va_arg(args, const char *);
348 prom_print(s);
349 break;
350 case 'x':
351 ++q;
352 v = va_arg(args, unsigned long);
353 prom_print_hex(v);
354 break;
355 }
356 }
357}
358
359
360static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
361 unsigned long align)
362{
363 int ret;
364 struct prom_t *_prom = &RELOC(prom);
365
366 ret = call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
367 (prom_arg_t)align);
368 if (ret != -1 && _prom->mmumap != 0)
369 /* old pmacs need us to map as well */
370 call_prom("call-method", 6, 1,
371 ADDR("map"), _prom->mmumap, 0, size, virt, virt);
372 return ret;
373}
374
375static void __init __attribute__((noreturn)) prom_panic(const char *reason)
376{
377#ifdef CONFIG_PPC64
378 reason = PTRRELOC(reason);
379#endif
380 prom_print(reason);
381 /* ToDo: should put up an SRC here on p/iSeries */
382 call_prom("exit", 0, 0);
383
384 for (;;) /* should never get here */
385 ;
386}
387
388
389static int __init prom_next_node(phandle *nodep)
390{
391 phandle node;
392
393 if ((node = *nodep) != 0
394 && (*nodep = call_prom("child", 1, 1, node)) != 0)
395 return 1;
396 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
397 return 1;
398 for (;;) {
399 if ((node = call_prom("parent", 1, 1, node)) == 0)
400 return 0;
401 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
402 return 1;
403 }
404}
405
406static int __init prom_getprop(phandle node, const char *pname,
407 void *value, size_t valuelen)
408{
409 return call_prom("getprop", 4, 1, node, ADDR(pname),
410 (u32)(unsigned long) value, (u32) valuelen);
411}
412
413static int __init prom_getproplen(phandle node, const char *pname)
414{
415 return call_prom("getproplen", 2, 1, node, ADDR(pname));
416}
417
418static int __init prom_setprop(phandle node, const char *pname,
419 void *value, size_t valuelen)
420{
421 return call_prom("setprop", 4, 1, node, ADDR(pname),
422 (u32)(unsigned long) value, (u32) valuelen);
423}
424
425/* We can't use the standard versions because of RELOC headaches. */
426#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
427 || ('a' <= (c) && (c) <= 'f') \
428 || ('A' <= (c) && (c) <= 'F'))
429
430#define isdigit(c) ('0' <= (c) && (c) <= '9')
431#define islower(c) ('a' <= (c) && (c) <= 'z')
432#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
433
434unsigned long prom_strtoul(const char *cp, const char **endp)
435{
436 unsigned long result = 0, base = 10, value;
437
438 if (*cp == '0') {
439 base = 8;
440 cp++;
441 if (toupper(*cp) == 'X') {
442 cp++;
443 base = 16;
444 }
445 }
446
447 while (isxdigit(*cp) &&
448 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
449 result = result * base + value;
450 cp++;
451 }
452
453 if (endp)
454 *endp = cp;
455
456 return result;
457}
458
459unsigned long prom_memparse(const char *ptr, const char **retptr)
460{
461 unsigned long ret = prom_strtoul(ptr, retptr);
462 int shift = 0;
463
464 /*
465 * We can't use a switch here because GCC *may* generate a
466 * jump table which won't work, because we're not running at
467 * the address we're linked at.
468 */
469 if ('G' == **retptr || 'g' == **retptr)
470 shift = 30;
471
472 if ('M' == **retptr || 'm' == **retptr)
473 shift = 20;
474
475 if ('K' == **retptr || 'k' == **retptr)
476 shift = 10;
477
478 if (shift) {
479 ret <<= shift;
480 (*retptr)++;
481 }
482
483 return ret;
484}
485
486/*
487 * Early parsing of the command line passed to the kernel, used for
488 * "mem=x" and the options that affect the iommu
489 */
490static void __init early_cmdline_parse(void)
491{
492 struct prom_t *_prom = &RELOC(prom);
493 char *opt, *p;
494 int l = 0;
495
496 RELOC(prom_cmd_line[0]) = 0;
497 p = RELOC(prom_cmd_line);
498 if ((long)_prom->chosen > 0)
499 l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
500#ifdef CONFIG_CMDLINE
501 if (l == 0) /* dbl check */
502 strlcpy(RELOC(prom_cmd_line),
503 RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line));
504#endif /* CONFIG_CMDLINE */
505 prom_printf("command line: %s\n", RELOC(prom_cmd_line));
506
507#ifdef CONFIG_PPC64
508 opt = strstr(RELOC(prom_cmd_line), RELOC("iommu="));
509 if (opt) {
510 prom_printf("iommu opt is: %s\n", opt);
511 opt += 6;
512 while (*opt && *opt == ' ')
513 opt++;
514 if (!strncmp(opt, RELOC("off"), 3))
515 RELOC(ppc64_iommu_off) = 1;
516 else if (!strncmp(opt, RELOC("force"), 5))
517 RELOC(iommu_force_on) = 1;
518 }
519#endif
520
521 opt = strstr(RELOC(prom_cmd_line), RELOC("mem="));
522 if (opt) {
523 opt += 4;
524 RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt);
525#ifdef CONFIG_PPC64
526 /* Align to 16 MB == size of ppc64 large page */
527 RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
528#endif
529 }
530}
531
532#ifdef CONFIG_PPC_PSERIES
533/*
534 * To tell the firmware what our capabilities are, we have to pass
535 * it a fake 32-bit ELF header containing a couple of PT_NOTE sections
536 * that contain structures that contain the actual values.
537 */
538static struct fake_elf {
539 Elf32_Ehdr elfhdr;
540 Elf32_Phdr phdr[2];
541 struct chrpnote {
542 u32 namesz;
543 u32 descsz;
544 u32 type;
545 char name[8]; /* "PowerPC" */
546 struct chrpdesc {
547 u32 real_mode;
548 u32 real_base;
549 u32 real_size;
550 u32 virt_base;
551 u32 virt_size;
552 u32 load_base;
553 } chrpdesc;
554 } chrpnote;
555 struct rpanote {
556 u32 namesz;
557 u32 descsz;
558 u32 type;
559 char name[24]; /* "IBM,RPA-Client-Config" */
560 struct rpadesc {
561 u32 lpar_affinity;
562 u32 min_rmo_size;
563 u32 min_rmo_percent;
564 u32 max_pft_size;
565 u32 splpar;
566 u32 min_load;
567 u32 new_mem_def;
568 u32 ignore_me;
569 } rpadesc;
570 } rpanote;
571} fake_elf = {
572 .elfhdr = {
573 .e_ident = { 0x7f, 'E', 'L', 'F',
574 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
575 .e_type = ET_EXEC, /* yeah right */
576 .e_machine = EM_PPC,
577 .e_version = EV_CURRENT,
578 .e_phoff = offsetof(struct fake_elf, phdr),
579 .e_phentsize = sizeof(Elf32_Phdr),
580 .e_phnum = 2
581 },
582 .phdr = {
583 [0] = {
584 .p_type = PT_NOTE,
585 .p_offset = offsetof(struct fake_elf, chrpnote),
586 .p_filesz = sizeof(struct chrpnote)
587 }, [1] = {
588 .p_type = PT_NOTE,
589 .p_offset = offsetof(struct fake_elf, rpanote),
590 .p_filesz = sizeof(struct rpanote)
591 }
592 },
593 .chrpnote = {
594 .namesz = sizeof("PowerPC"),
595 .descsz = sizeof(struct chrpdesc),
596 .type = 0x1275,
597 .name = "PowerPC",
598 .chrpdesc = {
599 .real_mode = ~0U, /* ~0 means "don't care" */
600 .real_base = ~0U,
601 .real_size = ~0U,
602 .virt_base = ~0U,
603 .virt_size = ~0U,
604 .load_base = ~0U
605 },
606 },
607 .rpanote = {
608 .namesz = sizeof("IBM,RPA-Client-Config"),
609 .descsz = sizeof(struct rpadesc),
610 .type = 0x12759999,
611 .name = "IBM,RPA-Client-Config",
612 .rpadesc = {
613 .lpar_affinity = 0,
614 .min_rmo_size = 64, /* in megabytes */
615 .min_rmo_percent = 0,
616 .max_pft_size = 48, /* 2^48 bytes max PFT size */
617 .splpar = 1,
618 .min_load = ~0U,
619 .new_mem_def = 0
620 }
621 }
622};
623
624static void __init prom_send_capabilities(void)
625{
626 ihandle elfloader;
627
628 elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader"));
629 if (elfloader == 0) {
630 prom_printf("couldn't open /packages/elf-loader\n");
631 return;
632 }
633 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
634 elfloader, ADDR(&fake_elf));
635 call_prom("close", 1, 0, elfloader);
636}
637#endif
638
639/*
640 * Memory allocation strategy... our layout is normally:
641 *
642 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
643 * rare cases, initrd might end up being before the kernel though.
644 * We assume this won't override the final kernel at 0, we have no
645 * provision to handle that in this version, but it should hopefully
646 * never happen.
647 *
648 * alloc_top is set to the top of RMO, eventually shrink down if the
649 * TCEs overlap
650 *
651 * alloc_bottom is set to the top of kernel/initrd
652 *
653 * from there, allocations are done this way : rtas is allocated
654 * topmost, and the device-tree is allocated from the bottom. We try
655 * to grow the device-tree allocation as we progress. If we can't,
656 * then we fail, we don't currently have a facility to restart
657 * elsewhere, but that shouldn't be necessary.
658 *
659 * Note that calls to reserve_mem have to be done explicitly, memory
660 * allocated with either alloc_up or alloc_down isn't automatically
661 * reserved.
662 */
663
664
665/*
666 * Allocates memory in the RMO upward from the kernel/initrd
667 *
668 * When align is 0, this is a special case, it means to allocate in place
669 * at the current location of alloc_bottom or fail (that is basically
670 * extending the previous allocation). Used for the device-tree flattening
671 */
672static unsigned long __init alloc_up(unsigned long size, unsigned long align)
673{
674 unsigned long base = RELOC(alloc_bottom);
675 unsigned long addr = 0;
676
677 if (align)
678 base = _ALIGN_UP(base, align);
679 prom_debug("alloc_up(%x, %x)\n", size, align);
680 if (RELOC(ram_top) == 0)
681 prom_panic("alloc_up() called with mem not initialized\n");
682
683 if (align)
684 base = _ALIGN_UP(RELOC(alloc_bottom), align);
685 else
686 base = RELOC(alloc_bottom);
687
688 for(; (base + size) <= RELOC(alloc_top);
689 base = _ALIGN_UP(base + 0x100000, align)) {
690 prom_debug(" trying: 0x%x\n\r", base);
691 addr = (unsigned long)prom_claim(base, size, 0);
692 if (addr != PROM_ERROR && addr != 0)
693 break;
694 addr = 0;
695 if (align == 0)
696 break;
697 }
698 if (addr == 0)
699 return 0;
700 RELOC(alloc_bottom) = addr;
701
702 prom_debug(" -> %x\n", addr);
703 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
704 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
705 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
706 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
707 prom_debug(" ram_top : %x\n", RELOC(ram_top));
708
709 return addr;
710}
711
712/*
713 * Allocates memory downward, either from top of RMO, or if highmem
714 * is set, from the top of RAM. Note that this one doesn't handle
715 * failures. It does claim memory if highmem is not set.
716 */
717static unsigned long __init alloc_down(unsigned long size, unsigned long align,
718 int highmem)
719{
720 unsigned long base, addr = 0;
721
722 prom_debug("alloc_down(%x, %x, %s)\n", size, align,
723 highmem ? RELOC("(high)") : RELOC("(low)"));
724 if (RELOC(ram_top) == 0)
725 prom_panic("alloc_down() called with mem not initialized\n");
726
727 if (highmem) {
728 /* Carve out storage for the TCE table. */
729 addr = _ALIGN_DOWN(RELOC(alloc_top_high) - size, align);
730 if (addr <= RELOC(alloc_bottom))
731 return 0;
732 /* Will we bump into the RMO ? If yes, check out that we
733 * didn't overlap existing allocations there, if we did,
734 * we are dead, we must be the first in town !
735 */
736 if (addr < RELOC(rmo_top)) {
737 /* Good, we are first */
738 if (RELOC(alloc_top) == RELOC(rmo_top))
739 RELOC(alloc_top) = RELOC(rmo_top) = addr;
740 else
741 return 0;
742 }
743 RELOC(alloc_top_high) = addr;
744 goto bail;
745 }
746
747 base = _ALIGN_DOWN(RELOC(alloc_top) - size, align);
748 for (; base > RELOC(alloc_bottom);
749 base = _ALIGN_DOWN(base - 0x100000, align)) {
750 prom_debug(" trying: 0x%x\n\r", base);
751 addr = (unsigned long)prom_claim(base, size, 0);
752 if (addr != PROM_ERROR && addr != 0)
753 break;
754 addr = 0;
755 }
756 if (addr == 0)
757 return 0;
758 RELOC(alloc_top) = addr;
759
760 bail:
761 prom_debug(" -> %x\n", addr);
762 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
763 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
764 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
765 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
766 prom_debug(" ram_top : %x\n", RELOC(ram_top));
767
768 return addr;
769}
770
771/*
772 * Parse a "reg" cell
773 */
774static unsigned long __init prom_next_cell(int s, cell_t **cellp)
775{
776 cell_t *p = *cellp;
777 unsigned long r = 0;
778
779 /* Ignore more than 2 cells */
780 while (s > sizeof(unsigned long) / 4) {
781 p++;
782 s--;
783 }
784 r = *p++;
785#ifdef CONFIG_PPC64
786 if (s > 1) {
787 r <<= 32;
788 r |= *(p++);
789 }
790#endif
791 *cellp = p;
792 return r;
793}
794
795/*
796 * Very dumb function for adding to the memory reserve list, but
797 * we don't need anything smarter at this point
798 *
799 * XXX Eventually check for collisions. They should NEVER happen.
800 * If problems seem to show up, it would be a good start to track
801 * them down.
802 */
803static void reserve_mem(unsigned long base, unsigned long size)
804{
805 unsigned long top = base + size;
806 unsigned long cnt = RELOC(mem_reserve_cnt);
807
808 if (size == 0)
809 return;
810
811 /* We need to always keep one empty entry so that we
812 * have our terminator with "size" set to 0 since we are
813 * dumb and just copy this entire array to the boot params
814 */
815 base = _ALIGN_DOWN(base, PAGE_SIZE);
816 top = _ALIGN_UP(top, PAGE_SIZE);
817 size = top - base;
818
819 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
820 prom_panic("Memory reserve map exhausted !\n");
821 RELOC(mem_reserve_map)[cnt].base = base;
822 RELOC(mem_reserve_map)[cnt].size = size;
823 RELOC(mem_reserve_cnt) = cnt + 1;
824}
825
826/*
827 * Initialize memory allocation mecanism, parse "memory" nodes and
828 * obtain that way the top of memory and RMO to setup out local allocator
829 */
830static void __init prom_init_mem(void)
831{
832 phandle node;
833 char *path, type[64];
834 unsigned int plen;
835 cell_t *p, *endp;
836 struct prom_t *_prom = &RELOC(prom);
837 u32 rac, rsc;
838
839 /*
840 * We iterate the memory nodes to find
841 * 1) top of RMO (first node)
842 * 2) top of memory
843 */
844 rac = 2;
845 prom_getprop(_prom->root, "#address-cells", &rac, sizeof(rac));
846 rsc = 1;
847 prom_getprop(_prom->root, "#size-cells", &rsc, sizeof(rsc));
848 prom_debug("root_addr_cells: %x\n", (unsigned long) rac);
849 prom_debug("root_size_cells: %x\n", (unsigned long) rsc);
850
851 prom_debug("scanning memory:\n");
852 path = RELOC(prom_scratch);
853
854 for (node = 0; prom_next_node(&node); ) {
855 type[0] = 0;
856 prom_getprop(node, "device_type", type, sizeof(type));
857
858 if (type[0] == 0) {
859 /*
860 * CHRP Longtrail machines have no device_type
861 * on the memory node, so check the name instead...
862 */
863 prom_getprop(node, "name", type, sizeof(type));
864 }
865 if (strcmp(type, RELOC("memory")))
866 continue;
867
868 plen = prom_getprop(node, "reg", RELOC(regbuf), sizeof(regbuf));
869 if (plen > sizeof(regbuf)) {
870 prom_printf("memory node too large for buffer !\n");
871 plen = sizeof(regbuf);
872 }
873 p = RELOC(regbuf);
874 endp = p + (plen / sizeof(cell_t));
875
876#ifdef DEBUG_PROM
877 memset(path, 0, PROM_SCRATCH_SIZE);
878 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
879 prom_debug(" node %s :\n", path);
880#endif /* DEBUG_PROM */
881
882 while ((endp - p) >= (rac + rsc)) {
883 unsigned long base, size;
884
885 base = prom_next_cell(rac, &p);
886 size = prom_next_cell(rsc, &p);
887
888 if (size == 0)
889 continue;
890 prom_debug(" %x %x\n", base, size);
891 if (base == 0)
892 RELOC(rmo_top) = size;
893 if ((base + size) > RELOC(ram_top))
894 RELOC(ram_top) = base + size;
895 }
896 }
897
898 RELOC(alloc_bottom) = PAGE_ALIGN((unsigned long)&RELOC(_end) + 0x4000);
899
900 /* Check if we have an initrd after the kernel, if we do move our bottom
901 * point to after it
902 */
903 if (RELOC(prom_initrd_start)) {
904 if (RELOC(prom_initrd_end) > RELOC(alloc_bottom))
905 RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
906 }
907
908 /*
909 * If prom_memory_limit is set we reduce the upper limits *except* for
910 * alloc_top_high. This must be the real top of RAM so we can put
911 * TCE's up there.
912 */
913
914 RELOC(alloc_top_high) = RELOC(ram_top);
915
916 if (RELOC(prom_memory_limit)) {
917 if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) {
918 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
919 RELOC(prom_memory_limit));
920 RELOC(prom_memory_limit) = 0;
921 } else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) {
922 prom_printf("Ignoring mem=%x >= ram_top.\n",
923 RELOC(prom_memory_limit));
924 RELOC(prom_memory_limit) = 0;
925 } else {
926 RELOC(ram_top) = RELOC(prom_memory_limit);
927 RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit));
928 }
929 }
930
931 /*
932 * Setup our top alloc point, that is top of RMO or top of
933 * segment 0 when running non-LPAR.
934 * Some RS64 machines have buggy firmware where claims up at
935 * 1GB fail. Cap at 768MB as a workaround.
936 * Since 768MB is plenty of room, and we need to cap to something
937 * reasonable on 32-bit, cap at 768MB on all machines.
938 */
939 if (!RELOC(rmo_top))
940 RELOC(rmo_top) = RELOC(ram_top);
941 RELOC(rmo_top) = min(0x30000000ul, RELOC(rmo_top));
942 RELOC(alloc_top) = RELOC(rmo_top);
943
944 prom_printf("memory layout at init:\n");
945 prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
946 prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom));
947 prom_printf(" alloc_top : %x\n", RELOC(alloc_top));
948 prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
949 prom_printf(" rmo_top : %x\n", RELOC(rmo_top));
950 prom_printf(" ram_top : %x\n", RELOC(ram_top));
951}
952
953
954/*
955 * Allocate room for and instantiate RTAS
956 */
957static void __init prom_instantiate_rtas(void)
958{
959 phandle rtas_node;
960 ihandle rtas_inst;
961 u32 base, entry = 0;
962 u32 size = 0;
963
964 prom_debug("prom_instantiate_rtas: start...\n");
965
966 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
967 prom_debug("rtas_node: %x\n", rtas_node);
968 if (!PHANDLE_VALID(rtas_node))
969 return;
970
971 prom_getprop(rtas_node, "rtas-size", &size, sizeof(size));
972 if (size == 0)
973 return;
974
975 base = alloc_down(size, PAGE_SIZE, 0);
976 if (base == 0) {
977 prom_printf("RTAS allocation failed !\n");
978 return;
979 }
980
981 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
982 if (!IHANDLE_VALID(rtas_inst)) {
983 prom_printf("opening rtas package failed");
984 return;
985 }
986
987 prom_printf("instantiating rtas at 0x%x ...", base);
988
989 if (call_prom_ret("call-method", 3, 2, &entry,
990 ADDR("instantiate-rtas"),
991 rtas_inst, base) == PROM_ERROR
992 || entry == 0) {
993 prom_printf(" failed\n");
994 return;
995 }
996 prom_printf(" done\n");
997
998 reserve_mem(base, size);
999
1000 prom_setprop(rtas_node, "linux,rtas-base", &base, sizeof(base));
1001 prom_setprop(rtas_node, "linux,rtas-entry", &entry, sizeof(entry));
1002
1003 prom_debug("rtas base = 0x%x\n", base);
1004 prom_debug("rtas entry = 0x%x\n", entry);
1005 prom_debug("rtas size = 0x%x\n", (long)size);
1006
1007 prom_debug("prom_instantiate_rtas: end...\n");
1008}
1009
1010#ifdef CONFIG_PPC64
1011/*
1012 * Allocate room for and initialize TCE tables
1013 */
1014static void __init prom_initialize_tce_table(void)
1015{
1016 phandle node;
1017 ihandle phb_node;
1018 char compatible[64], type[64], model[64];
1019 char *path = RELOC(prom_scratch);
1020 u64 base, align;
1021 u32 minalign, minsize;
1022 u64 tce_entry, *tce_entryp;
1023 u64 local_alloc_top, local_alloc_bottom;
1024 u64 i;
1025
1026 if (RELOC(ppc64_iommu_off))
1027 return;
1028
1029 prom_debug("starting prom_initialize_tce_table\n");
1030
1031 /* Cache current top of allocs so we reserve a single block */
1032 local_alloc_top = RELOC(alloc_top_high);
1033 local_alloc_bottom = local_alloc_top;
1034
1035 /* Search all nodes looking for PHBs. */
1036 for (node = 0; prom_next_node(&node); ) {
1037 compatible[0] = 0;
1038 type[0] = 0;
1039 model[0] = 0;
1040 prom_getprop(node, "compatible",
1041 compatible, sizeof(compatible));
1042 prom_getprop(node, "device_type", type, sizeof(type));
1043 prom_getprop(node, "model", model, sizeof(model));
1044
1045 if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL))
1046 continue;
1047
1048 /* Keep the old logic in tack to avoid regression. */
1049 if (compatible[0] != 0) {
1050 if ((strstr(compatible, RELOC("python")) == NULL) &&
1051 (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
1052 (strstr(compatible, RELOC("Winnipeg")) == NULL))
1053 continue;
1054 } else if (model[0] != 0) {
1055 if ((strstr(model, RELOC("ython")) == NULL) &&
1056 (strstr(model, RELOC("peedwagon")) == NULL) &&
1057 (strstr(model, RELOC("innipeg")) == NULL))
1058 continue;
1059 }
1060
1061 if (prom_getprop(node, "tce-table-minalign", &minalign,
1062 sizeof(minalign)) == PROM_ERROR)
1063 minalign = 0;
1064 if (prom_getprop(node, "tce-table-minsize", &minsize,
1065 sizeof(minsize)) == PROM_ERROR)
1066 minsize = 4UL << 20;
1067
1068 /*
1069 * Even though we read what OF wants, we just set the table
1070 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1071 * By doing this, we avoid the pitfalls of trying to DMA to
1072 * MMIO space and the DMA alias hole.
1073 *
1074 * On POWER4, firmware sets the TCE region by assuming
1075 * each TCE table is 8MB. Using this memory for anything
1076 * else will impact performance, so we always allocate 8MB.
1077 * Anton
1078 */
1079 if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
1080 minsize = 8UL << 20;
1081 else
1082 minsize = 4UL << 20;
1083
1084 /* Align to the greater of the align or size */
1085 align = max(minalign, minsize);
1086 base = alloc_down(minsize, align, 1);
1087 if (base == 0)
1088 prom_panic("ERROR, cannot find space for TCE table.\n");
1089 if (base < local_alloc_bottom)
1090 local_alloc_bottom = base;
1091
1092 /* Save away the TCE table attributes for later use. */
1093 prom_setprop(node, "linux,tce-base", &base, sizeof(base));
1094 prom_setprop(node, "linux,tce-size", &minsize, sizeof(minsize));
1095
1096 /* It seems OF doesn't null-terminate the path :-( */
1097 memset(path, 0, sizeof(path));
1098 /* Call OF to setup the TCE hardware */
1099 if (call_prom("package-to-path", 3, 1, node,
1100 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1101 prom_printf("package-to-path failed\n");
1102 }
1103
1104 prom_debug("TCE table: %s\n", path);
1105 prom_debug("\tnode = 0x%x\n", node);
1106 prom_debug("\tbase = 0x%x\n", base);
1107 prom_debug("\tsize = 0x%x\n", minsize);
1108
1109 /* Initialize the table to have a one-to-one mapping
1110 * over the allocated size.
1111 */
1112 tce_entryp = (unsigned long *)base;
1113 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1114 tce_entry = (i << PAGE_SHIFT);
1115 tce_entry |= 0x3;
1116 *tce_entryp = tce_entry;
1117 }
1118
1119 prom_printf("opening PHB %s", path);
1120 phb_node = call_prom("open", 1, 1, path);
1121 if (phb_node == 0)
1122 prom_printf("... failed\n");
1123 else
1124 prom_printf("... done\n");
1125
1126 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1127 phb_node, -1, minsize,
1128 (u32) base, (u32) (base >> 32));
1129 call_prom("close", 1, 0, phb_node);
1130 }
1131
1132 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1133
1134 if (RELOC(prom_memory_limit)) {
1135 /*
1136 * We align the start to a 16MB boundary so we can map
1137 * the TCE area using large pages if possible.
1138 * The end should be the top of RAM so no need to align it.
1139 */
1140 RELOC(prom_tce_alloc_start) = _ALIGN_DOWN(local_alloc_bottom,
1141 0x1000000);
1142 RELOC(prom_tce_alloc_end) = local_alloc_top;
1143 }
1144
1145 /* Flag the first invalid entry */
1146 prom_debug("ending prom_initialize_tce_table\n");
1147}
1148#endif
1149
1150/*
1151 * With CHRP SMP we need to use the OF to start the other processors.
1152 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1153 * so we have to put the processors into a holding pattern controlled
1154 * by the kernel (not OF) before we destroy the OF.
1155 *
1156 * This uses a chunk of low memory, puts some holding pattern
1157 * code there and sends the other processors off to there until
1158 * smp_boot_cpus tells them to do something. The holding pattern
1159 * checks that address until its cpu # is there, when it is that
1160 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1161 * of setting those values.
1162 *
1163 * We also use physical address 0x4 here to tell when a cpu
1164 * is in its holding pattern code.
1165 *
1166 * -- Cort
1167 */
1168extern void __secondary_hold(void);
1169extern unsigned long __secondary_hold_spinloop;
1170extern unsigned long __secondary_hold_acknowledge;
1171
1172/*
1173 * We want to reference the copy of __secondary_hold_* in the
1174 * 0 - 0x100 address range
1175 */
1176#define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1177
1178static void __init prom_hold_cpus(void)
1179{
1180 unsigned long i;
1181 unsigned int reg;
1182 phandle node;
1183 char type[64];
1184 int cpuid = 0;
1185 unsigned int interrupt_server[MAX_CPU_THREADS];
1186 unsigned int cpu_threads, hw_cpu_num;
1187 int propsize;
1188 struct prom_t *_prom = &RELOC(prom);
1189 unsigned long *spinloop
1190 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1191 unsigned long *acknowledge
1192 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1193#ifdef CONFIG_PPC64
1194 /* __secondary_hold is actually a descriptor, not the text address */
1195 unsigned long secondary_hold
1196 = __pa(*PTRRELOC((unsigned long *)__secondary_hold));
1197#else
1198 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1199#endif
1200
1201 prom_debug("prom_hold_cpus: start...\n");
1202 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1203 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
1204 prom_debug(" 1) acknowledge = 0x%x\n",
1205 (unsigned long)acknowledge);
1206 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
1207 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
1208
1209 /* Set the common spinloop variable, so all of the secondary cpus
1210 * will block when they are awakened from their OF spinloop.
1211 * This must occur for both SMP and non SMP kernels, since OF will
1212 * be trashed when we move the kernel.
1213 */
1214 *spinloop = 0;
1215
1216#ifdef CONFIG_HMT
1217 for (i = 0; i < NR_CPUS; i++)
1218 RELOC(hmt_thread_data)[i].pir = 0xdeadbeef;
1219#endif
1220 /* look for cpus */
1221 for (node = 0; prom_next_node(&node); ) {
1222 type[0] = 0;
1223 prom_getprop(node, "device_type", type, sizeof(type));
1224 if (strcmp(type, RELOC("cpu")) != 0)
1225 continue;
1226
1227 /* Skip non-configured cpus. */
1228 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1229 if (strcmp(type, RELOC("okay")) != 0)
1230 continue;
1231
1232 reg = -1;
1233 prom_getprop(node, "reg", &reg, sizeof(reg));
1234
1235 prom_debug("\ncpuid = 0x%x\n", cpuid);
1236 prom_debug("cpu hw idx = 0x%x\n", reg);
1237
1238 /* Init the acknowledge var which will be reset by
1239 * the secondary cpu when it awakens from its OF
1240 * spinloop.
1241 */
1242 *acknowledge = (unsigned long)-1;
1243
1244 propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s",
1245 &interrupt_server,
1246 sizeof(interrupt_server));
1247 if (propsize < 0) {
1248 /* no property. old hardware has no SMT */
1249 cpu_threads = 1;
1250 interrupt_server[0] = reg; /* fake it with phys id */
1251 } else {
1252 /* We have a threaded processor */
1253 cpu_threads = propsize / sizeof(u32);
1254 if (cpu_threads > MAX_CPU_THREADS) {
1255 prom_printf("SMT: too many threads!\n"
1256 "SMT: found %x, max is %x\n",
1257 cpu_threads, MAX_CPU_THREADS);
1258 cpu_threads = 1; /* ToDo: panic? */
1259 }
1260 }
1261
1262 hw_cpu_num = interrupt_server[0];
1263 if (hw_cpu_num != _prom->cpu) {
1264 /* Primary Thread of non-boot cpu */
1265 prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg);
1266 call_prom("start-cpu", 3, 0, node,
1267 secondary_hold, reg);
1268
1269 for (i = 0; (i < 100000000) &&
1270 (*acknowledge == ((unsigned long)-1)); i++ )
1271 mb();
1272
1273 if (*acknowledge == reg)
1274 prom_printf("done\n");
1275 else
1276 prom_printf("failed: %x\n", *acknowledge);
1277 }
1278#ifdef CONFIG_SMP
1279 else
1280 prom_printf("%x : boot cpu %x\n", cpuid, reg);
1281#endif /* CONFIG_SMP */
1282
1283 /* Reserve cpu #s for secondary threads. They start later. */
1284 cpuid += cpu_threads;
1285 }
1286#ifdef CONFIG_HMT
1287 /* Only enable HMT on processors that provide support. */
1288 if (__is_processor(PV_PULSAR) ||
1289 __is_processor(PV_ICESTAR) ||
1290 __is_processor(PV_SSTAR)) {
1291 prom_printf(" starting secondary threads\n");
1292
1293 for (i = 0; i < NR_CPUS; i += 2) {
1294 if (!cpu_online(i))
1295 continue;
1296
1297 if (i == 0) {
1298 unsigned long pir = mfspr(SPRN_PIR);
1299 if (__is_processor(PV_PULSAR)) {
1300 RELOC(hmt_thread_data)[i].pir =
1301 pir & 0x1f;
1302 } else {
1303 RELOC(hmt_thread_data)[i].pir =
1304 pir & 0x3ff;
1305 }
1306 }
1307 }
1308 } else {
1309 prom_printf("Processor is not HMT capable\n");
1310 }
1311#endif
1312
1313 if (cpuid > NR_CPUS)
1314 prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
1315 ") exceeded: ignoring extras\n");
1316
1317 prom_debug("prom_hold_cpus: end...\n");
1318}
1319
1320
1321static void __init prom_init_client_services(unsigned long pp)
1322{
1323 struct prom_t *_prom = &RELOC(prom);
1324
1325 /* Get a handle to the prom entry point before anything else */
1326 RELOC(prom_entry) = pp;
1327
1328 /* get a handle for the stdout device */
1329 _prom->chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1330 if (!PHANDLE_VALID(_prom->chosen))
1331 prom_panic("cannot find chosen"); /* msg won't be printed :( */
1332
1333 /* get device tree root */
1334 _prom->root = call_prom("finddevice", 1, 1, ADDR("/"));
1335 if (!PHANDLE_VALID(_prom->root))
1336 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1337
1338 _prom->mmumap = 0;
1339}
1340
1341#ifdef CONFIG_PPC32
1342/*
1343 * For really old powermacs, we need to map things we claim.
1344 * For that, we need the ihandle of the mmu.
1345 */
1346static void __init prom_find_mmu(void)
1347{
1348 struct prom_t *_prom = &RELOC(prom);
1349 phandle oprom;
1350 char version[64];
1351
1352 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
1353 if (!PHANDLE_VALID(oprom))
1354 return;
1355 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1356 return;
1357 version[sizeof(version) - 1] = 0;
1358 prom_printf("OF version is '%s'\n", version);
1359 /* XXX might need to add other versions here */
1360 if (strcmp(version, "Open Firmware, 1.0.5") != 0)
1361 return;
1362 prom_getprop(_prom->chosen, "mmu", &_prom->mmumap,
1363 sizeof(_prom->mmumap));
1364}
1365#else
1366#define prom_find_mmu()
1367#endif
1368
1369static void __init prom_init_stdout(void)
1370{
1371 struct prom_t *_prom = &RELOC(prom);
1372 char *path = RELOC(of_stdout_device);
1373 char type[16];
1374 u32 val;
1375
1376 if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
1377 prom_panic("cannot find stdout");
1378
1379 _prom->stdout = val;
1380
1381 /* Get the full OF pathname of the stdout device */
1382 memset(path, 0, 256);
1383 call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255);
1384 val = call_prom("instance-to-package", 1, 1, _prom->stdout);
1385 prom_setprop(_prom->chosen, "linux,stdout-package", &val, sizeof(val));
1386 prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device));
1387 prom_setprop(_prom->chosen, "linux,stdout-path",
1388 RELOC(of_stdout_device), strlen(RELOC(of_stdout_device))+1);
1389
1390 /* If it's a display, note it */
1391 memset(type, 0, sizeof(type));
1392 prom_getprop(val, "device_type", type, sizeof(type));
1393 if (strcmp(type, RELOC("display")) == 0)
1394 prom_setprop(val, "linux,boot-display", NULL, 0);
1395}
1396
1397static void __init prom_close_stdin(void)
1398{
1399 struct prom_t *_prom = &RELOC(prom);
1400 ihandle val;
1401
1402 if (prom_getprop(_prom->chosen, "stdin", &val, sizeof(val)) > 0)
1403 call_prom("close", 1, 0, val);
1404}
1405
1406static int __init prom_find_machine_type(void)
1407{
1408 struct prom_t *_prom = &RELOC(prom);
1409 char compat[256];
1410 int len, i = 0;
1411 phandle rtas;
1412
1413 len = prom_getprop(_prom->root, "compatible",
1414 compat, sizeof(compat)-1);
1415 if (len > 0) {
1416 compat[len] = 0;
1417 while (i < len) {
1418 char *p = &compat[i];
1419 int sl = strlen(p);
1420 if (sl == 0)
1421 break;
1422 if (strstr(p, RELOC("Power Macintosh")) ||
1423 strstr(p, RELOC("MacRISC")))
1424 return PLATFORM_POWERMAC;
1425#ifdef CONFIG_PPC64
1426 if (strstr(p, RELOC("Momentum,Maple")))
1427 return PLATFORM_MAPLE;
1428#endif
1429 i += sl + 1;
1430 }
1431 }
1432#ifdef CONFIG_PPC64
1433 /* Default to pSeries. We need to know if we are running LPAR */
1434 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1435 if (PHANDLE_VALID(rtas)) {
1436 int x = prom_getproplen(rtas, "ibm,hypertas-functions");
1437 if (x != PROM_ERROR) {
1438 prom_printf("Hypertas detected, assuming LPAR !\n");
1439 return PLATFORM_PSERIES_LPAR;
1440 }
1441 }
1442 return PLATFORM_PSERIES;
1443#else
1444 return PLATFORM_CHRP;
1445#endif
1446}
1447
1448static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
1449{
1450 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
1451}
1452
1453/*
1454 * If we have a display that we don't know how to drive,
1455 * we will want to try to execute OF's open method for it
1456 * later. However, OF will probably fall over if we do that
1457 * we've taken over the MMU.
1458 * So we check whether we will need to open the display,
1459 * and if so, open it now.
1460 */
1461static void __init prom_check_displays(void)
1462{
1463 char type[16], *path;
1464 phandle node;
1465 ihandle ih;
1466 int i;
1467
1468 static unsigned char default_colors[] = {
1469 0x00, 0x00, 0x00,
1470 0x00, 0x00, 0xaa,
1471 0x00, 0xaa, 0x00,
1472 0x00, 0xaa, 0xaa,
1473 0xaa, 0x00, 0x00,
1474 0xaa, 0x00, 0xaa,
1475 0xaa, 0xaa, 0x00,
1476 0xaa, 0xaa, 0xaa,
1477 0x55, 0x55, 0x55,
1478 0x55, 0x55, 0xff,
1479 0x55, 0xff, 0x55,
1480 0x55, 0xff, 0xff,
1481 0xff, 0x55, 0x55,
1482 0xff, 0x55, 0xff,
1483 0xff, 0xff, 0x55,
1484 0xff, 0xff, 0xff
1485 };
1486 const unsigned char *clut;
1487
1488 prom_printf("Looking for displays\n");
1489 for (node = 0; prom_next_node(&node); ) {
1490 memset(type, 0, sizeof(type));
1491 prom_getprop(node, "device_type", type, sizeof(type));
1492 if (strcmp(type, RELOC("display")) != 0)
1493 continue;
1494
1495 /* It seems OF doesn't null-terminate the path :-( */
1496 path = RELOC(prom_scratch);
1497 memset(path, 0, PROM_SCRATCH_SIZE);
1498
1499 /*
1500 * leave some room at the end of the path for appending extra
1501 * arguments
1502 */
1503 if (call_prom("package-to-path", 3, 1, node, path,
1504 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
1505 continue;
1506 prom_printf("found display : %s, opening ... ", path);
1507
1508 ih = call_prom("open", 1, 1, path);
1509 if (ih == 0) {
1510 prom_printf("failed\n");
1511 continue;
1512 }
1513
1514 /* Success */
1515 prom_printf("done\n");
1516 prom_setprop(node, "linux,opened", NULL, 0);
1517
1518 /* Setup a usable color table when the appropriate
1519 * method is available. Should update this to set-colors */
1520 clut = RELOC(default_colors);
1521 for (i = 0; i < 32; i++, clut += 3)
1522 if (prom_set_color(ih, i, clut[0], clut[1],
1523 clut[2]) != 0)
1524 break;
1525
1526#ifdef CONFIG_LOGO_LINUX_CLUT224
1527 clut = PTRRELOC(RELOC(logo_linux_clut224.clut));
1528 for (i = 0; i < RELOC(logo_linux_clut224.clutsize); i++, clut += 3)
1529 if (prom_set_color(ih, i + 32, clut[0], clut[1],
1530 clut[2]) != 0)
1531 break;
1532#endif /* CONFIG_LOGO_LINUX_CLUT224 */
1533 }
1534}
1535
1536
1537/* Return (relocated) pointer to this much memory: moves initrd if reqd. */
1538static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
1539 unsigned long needed, unsigned long align)
1540{
1541 void *ret;
1542
1543 *mem_start = _ALIGN(*mem_start, align);
1544 while ((*mem_start + needed) > *mem_end) {
1545 unsigned long room, chunk;
1546
1547 prom_debug("Chunk exhausted, claiming more at %x...\n",
1548 RELOC(alloc_bottom));
1549 room = RELOC(alloc_top) - RELOC(alloc_bottom);
1550 if (room > DEVTREE_CHUNK_SIZE)
1551 room = DEVTREE_CHUNK_SIZE;
1552 if (room < PAGE_SIZE)
1553 prom_panic("No memory for flatten_device_tree (no room)");
1554 chunk = alloc_up(room, 0);
1555 if (chunk == 0)
1556 prom_panic("No memory for flatten_device_tree (claim failed)");
1557 *mem_end = RELOC(alloc_top);
1558 }
1559
1560 ret = (void *)*mem_start;
1561 *mem_start += needed;
1562
1563 return ret;
1564}
1565
1566#define dt_push_token(token, mem_start, mem_end) \
1567 do { *((u32 *)make_room(mem_start, mem_end, 4, 4)) = token; } while(0)
1568
1569static unsigned long __init dt_find_string(char *str)
1570{
1571 char *s, *os;
1572
1573 s = os = (char *)RELOC(dt_string_start);
1574 s += 4;
1575 while (s < (char *)RELOC(dt_string_end)) {
1576 if (strcmp(s, str) == 0)
1577 return s - os;
1578 s += strlen(s) + 1;
1579 }
1580 return 0;
1581}
1582
1583/*
1584 * The Open Firmware 1275 specification states properties must be 31 bytes or
1585 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
1586 */
1587#define MAX_PROPERTY_NAME 64
1588
1589static void __init scan_dt_build_strings(phandle node,
1590 unsigned long *mem_start,
1591 unsigned long *mem_end)
1592{
1593 char *prev_name, *namep, *sstart;
1594 unsigned long soff;
1595 phandle child;
1596
1597 sstart = (char *)RELOC(dt_string_start);
1598
1599 /* get and store all property names */
1600 prev_name = RELOC("");
1601 for (;;) {
1602 /* 64 is max len of name including nul. */
1603 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
1604 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
1605 /* No more nodes: unwind alloc */
1606 *mem_start = (unsigned long)namep;
1607 break;
1608 }
1609
1610 /* skip "name" */
1611 if (strcmp(namep, RELOC("name")) == 0) {
1612 *mem_start = (unsigned long)namep;
1613 prev_name = RELOC("name");
1614 continue;
1615 }
1616 /* get/create string entry */
1617 soff = dt_find_string(namep);
1618 if (soff != 0) {
1619 *mem_start = (unsigned long)namep;
1620 namep = sstart + soff;
1621 } else {
1622 /* Trim off some if we can */
1623 *mem_start = (unsigned long)namep + strlen(namep) + 1;
1624 RELOC(dt_string_end) = *mem_start;
1625 }
1626 prev_name = namep;
1627 }
1628
1629 /* do all our children */
1630 child = call_prom("child", 1, 1, node);
1631 while (child != 0) {
1632 scan_dt_build_strings(child, mem_start, mem_end);
1633 child = call_prom("peer", 1, 1, child);
1634 }
1635}
1636
1637static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1638 unsigned long *mem_end)
1639{
1640 phandle child;
1641 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
1642 unsigned long soff;
1643 unsigned char *valp;
1644 static char pname[MAX_PROPERTY_NAME];
1645 int l, room;
1646
1647 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
1648
1649 /* get the node's full name */
1650 namep = (char *)*mem_start;
1651 room = *mem_end - *mem_start;
1652 if (room > 255)
1653 room = 255;
1654 l = call_prom("package-to-path", 3, 1, node, namep, room);
1655 if (l >= 0) {
1656 /* Didn't fit? Get more room. */
1657 if (l >= room) {
1658 if (l >= *mem_end - *mem_start)
1659 namep = make_room(mem_start, mem_end, l+1, 1);
1660 call_prom("package-to-path", 3, 1, node, namep, l);
1661 }
1662 namep[l] = '\0';
1663
1664 /* Fixup an Apple bug where they have bogus \0 chars in the
1665 * middle of the path in some properties, and extract
1666 * the unit name (everything after the last '/').
1667 */
1668 for (lp = p = namep, ep = namep + l; p < ep; p++) {
1669 if (*p == '/')
1670 lp = namep;
1671 else if (*p != 0)
1672 *lp++ = *p;
1673 }
1674 *lp = 0;
1675 *mem_start = _ALIGN((unsigned long)lp + 1, 4);
1676 }
1677
1678 /* get it again for debugging */
1679 path = RELOC(prom_scratch);
1680 memset(path, 0, PROM_SCRATCH_SIZE);
1681 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1682
1683 /* get and store all properties */
1684 prev_name = RELOC("");
1685 sstart = (char *)RELOC(dt_string_start);
1686 for (;;) {
1687 if (call_prom("nextprop", 3, 1, node, prev_name,
1688 RELOC(pname)) != 1)
1689 break;
1690
1691 /* skip "name" */
1692 if (strcmp(RELOC(pname), RELOC("name")) == 0) {
1693 prev_name = RELOC("name");
1694 continue;
1695 }
1696
1697 /* find string offset */
1698 soff = dt_find_string(RELOC(pname));
1699 if (soff == 0) {
1700 prom_printf("WARNING: Can't find string index for"
1701 " <%s>, node %s\n", RELOC(pname), path);
1702 break;
1703 }
1704 prev_name = sstart + soff;
1705
1706 /* get length */
1707 l = call_prom("getproplen", 2, 1, node, RELOC(pname));
1708
1709 /* sanity checks */
1710 if (l == PROM_ERROR)
1711 continue;
1712 if (l > MAX_PROPERTY_LENGTH) {
1713 prom_printf("WARNING: ignoring large property ");
1714 /* It seems OF doesn't null-terminate the path :-( */
1715 prom_printf("[%s] ", path);
1716 prom_printf("%s length 0x%x\n", RELOC(pname), l);
1717 continue;
1718 }
1719
1720 /* push property head */
1721 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1722 dt_push_token(l, mem_start, mem_end);
1723 dt_push_token(soff, mem_start, mem_end);
1724
1725 /* push property content */
1726 valp = make_room(mem_start, mem_end, l, 4);
1727 call_prom("getprop", 4, 1, node, RELOC(pname), valp, l);
1728 *mem_start = _ALIGN(*mem_start, 4);
1729 }
1730
1731 /* Add a "linux,phandle" property. */
1732 soff = dt_find_string(RELOC("linux,phandle"));
1733 if (soff == 0)
1734 prom_printf("WARNING: Can't find string index for"
1735 " <linux-phandle> node %s\n", path);
1736 else {
1737 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1738 dt_push_token(4, mem_start, mem_end);
1739 dt_push_token(soff, mem_start, mem_end);
1740 valp = make_room(mem_start, mem_end, 4, 4);
1741 *(u32 *)valp = node;
1742 }
1743
1744 /* do all our children */
1745 child = call_prom("child", 1, 1, node);
1746 while (child != 0) {
1747 scan_dt_build_struct(child, mem_start, mem_end);
1748 child = call_prom("peer", 1, 1, child);
1749 }
1750
1751 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
1752}
1753
1754static void __init flatten_device_tree(void)
1755{
1756 phandle root;
1757 unsigned long mem_start, mem_end, room;
1758 struct boot_param_header *hdr;
1759 struct prom_t *_prom = &RELOC(prom);
1760 char *namep;
1761 u64 *rsvmap;
1762
1763 /*
1764 * Check how much room we have between alloc top & bottom (+/- a
1765 * few pages), crop to 4Mb, as this is our "chuck" size
1766 */
1767 room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000;
1768 if (room > DEVTREE_CHUNK_SIZE)
1769 room = DEVTREE_CHUNK_SIZE;
1770 prom_debug("starting device tree allocs at %x\n", RELOC(alloc_bottom));
1771
1772 /* Now try to claim that */
1773 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
1774 if (mem_start == 0)
1775 prom_panic("Can't allocate initial device-tree chunk\n");
1776 mem_end = RELOC(alloc_top);
1777
1778 /* Get root of tree */
1779 root = call_prom("peer", 1, 1, (phandle)0);
1780 if (root == (phandle)0)
1781 prom_panic ("couldn't get device tree root\n");
1782
1783 /* Build header and make room for mem rsv map */
1784 mem_start = _ALIGN(mem_start, 4);
1785 hdr = make_room(&mem_start, &mem_end,
1786 sizeof(struct boot_param_header), 4);
1787 RELOC(dt_header_start) = (unsigned long)hdr;
1788 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
1789
1790 /* Start of strings */
1791 mem_start = PAGE_ALIGN(mem_start);
1792 RELOC(dt_string_start) = mem_start;
1793 mem_start += 4; /* hole */
1794
1795 /* Add "linux,phandle" in there, we'll need it */
1796 namep = make_room(&mem_start, &mem_end, 16, 1);
1797 strcpy(namep, RELOC("linux,phandle"));
1798 mem_start = (unsigned long)namep + strlen(namep) + 1;
1799
1800 /* Build string array */
1801 prom_printf("Building dt strings...\n");
1802 scan_dt_build_strings(root, &mem_start, &mem_end);
1803 RELOC(dt_string_end) = mem_start;
1804
1805 /* Build structure */
1806 mem_start = PAGE_ALIGN(mem_start);
1807 RELOC(dt_struct_start) = mem_start;
1808 prom_printf("Building dt structure...\n");
1809 scan_dt_build_struct(root, &mem_start, &mem_end);
1810 dt_push_token(OF_DT_END, &mem_start, &mem_end);
1811 RELOC(dt_struct_end) = PAGE_ALIGN(mem_start);
1812
1813 /* Finish header */
1814 hdr->boot_cpuid_phys = _prom->cpu;
1815 hdr->magic = OF_DT_HEADER;
1816 hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start);
1817 hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start);
1818 hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start);
1819 hdr->dt_strings_size = RELOC(dt_string_end) - RELOC(dt_string_start);
1820 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start);
1821 hdr->version = OF_DT_VERSION;
1822 /* Version 16 is not backward compatible */
1823 hdr->last_comp_version = 0x10;
1824
1825 /* Reserve the whole thing and copy the reserve map in, we
1826 * also bump mem_reserve_cnt to cause further reservations to
1827 * fail since it's too late.
1828 */
1829 reserve_mem(RELOC(dt_header_start), hdr->totalsize);
1830 memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map));
1831
1832#ifdef DEBUG_PROM
1833 {
1834 int i;
1835 prom_printf("reserved memory map:\n");
1836 for (i = 0; i < RELOC(mem_reserve_cnt); i++)
1837 prom_printf(" %x - %x\n",
1838 RELOC(mem_reserve_map)[i].base,
1839 RELOC(mem_reserve_map)[i].size);
1840 }
1841#endif
1842 RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE;
1843
1844 prom_printf("Device tree strings 0x%x -> 0x%x\n",
1845 RELOC(dt_string_start), RELOC(dt_string_end));
1846 prom_printf("Device tree struct 0x%x -> 0x%x\n",
1847 RELOC(dt_struct_start), RELOC(dt_struct_end));
1848
1849}
1850
1851
1852static void __init fixup_device_tree(void)
1853{
1854#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
1855 phandle u3, i2c, mpic;
1856 u32 u3_rev;
1857 u32 interrupts[2];
1858 u32 parent;
1859
1860 /* Some G5s have a missing interrupt definition, fix it up here */
1861 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
1862 if (!PHANDLE_VALID(u3))
1863 return;
1864 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
1865 if (!PHANDLE_VALID(i2c))
1866 return;
1867 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
1868 if (!PHANDLE_VALID(mpic))
1869 return;
1870
1871 /* check if proper rev of u3 */
1872 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
1873 == PROM_ERROR)
1874 return;
1875 if (u3_rev != 0x35 && u3_rev != 0x37)
1876 return;
1877 /* does it need fixup ? */
1878 if (prom_getproplen(i2c, "interrupts") > 0)
1879 return;
1880
1881 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
1882
1883 /* interrupt on this revision of u3 is number 0 and level */
1884 interrupts[0] = 0;
1885 interrupts[1] = 1;
1886 prom_setprop(i2c, "interrupts", &interrupts, sizeof(interrupts));
1887 parent = (u32)mpic;
1888 prom_setprop(i2c, "interrupt-parent", &parent, sizeof(parent));
1889#endif
1890}
1891
1892
1893static void __init prom_find_boot_cpu(void)
1894{
1895 struct prom_t *_prom = &RELOC(prom);
1896 u32 getprop_rval;
1897 ihandle prom_cpu;
1898 phandle cpu_pkg;
1899
1900 _prom->cpu = 0;
1901 if (prom_getprop(_prom->chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0)
1902 return;
1903
1904 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
1905
1906 prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
1907 _prom->cpu = getprop_rval;
1908
1909 prom_debug("Booting CPU hw index = 0x%x\n", _prom->cpu);
1910}
1911
1912static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
1913{
1914#ifdef CONFIG_BLK_DEV_INITRD
1915 struct prom_t *_prom = &RELOC(prom);
1916
1917 if (r3 && r4 && r4 != 0xdeadbeef) {
1918 unsigned long val;
1919
1920 RELOC(prom_initrd_start) = (r3 >= KERNELBASE) ? __pa(r3) : r3;
1921 RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
1922
1923 val = RELOC(prom_initrd_start);
1924 prom_setprop(_prom->chosen, "linux,initrd-start", &val,
1925 sizeof(val));
1926 val = RELOC(prom_initrd_end);
1927 prom_setprop(_prom->chosen, "linux,initrd-end", &val,
1928 sizeof(val));
1929
1930 reserve_mem(RELOC(prom_initrd_start),
1931 RELOC(prom_initrd_end) - RELOC(prom_initrd_start));
1932
1933 prom_debug("initrd_start=0x%x\n", RELOC(prom_initrd_start));
1934 prom_debug("initrd_end=0x%x\n", RELOC(prom_initrd_end));
1935 }
1936#endif /* CONFIG_BLK_DEV_INITRD */
1937}
1938
1939/*
1940 * We enter here early on, when the Open Firmware prom is still
1941 * handling exceptions and the MMU hash table for us.
1942 */
1943
1944unsigned long __init prom_init(unsigned long r3, unsigned long r4,
1945 unsigned long pp,
1946 unsigned long r6, unsigned long r7)
1947{
1948 struct prom_t *_prom;
1949 unsigned long hdr;
1950 u32 getprop_rval;
1951 unsigned long offset = reloc_offset();
1952
1953#ifdef CONFIG_PPC32
1954 reloc_got2(offset);
1955#endif
1956
1957 _prom = &RELOC(prom);
1958
1959 /*
1960 * First zero the BSS
1961 */
1962 memset(&RELOC(__bss_start), 0, __bss_stop - __bss_start);
1963
1964 /*
1965 * Init interface to Open Firmware, get some node references,
1966 * like /chosen
1967 */
1968 prom_init_client_services(pp);
1969
1970 /*
1971 * Init prom stdout device
1972 */
1973 prom_init_stdout();
1974
1975 /*
1976 * See if this OF is old enough that we need to do explicit maps
1977 */
1978 prom_find_mmu();
1979
1980 /*
1981 * Check for an initrd
1982 */
1983 prom_check_initrd(r3, r4);
1984
1985 /*
1986 * Get default machine type. At this point, we do not differentiate
1987 * between pSeries SMP and pSeries LPAR
1988 */
1989 RELOC(of_platform) = prom_find_machine_type();
1990 getprop_rval = RELOC(of_platform);
1991 prom_setprop(_prom->chosen, "linux,platform",
1992 &getprop_rval, sizeof(getprop_rval));
1993
1994#ifdef CONFIG_PPC_PSERIES
1995 /*
1996 * On pSeries, inform the firmware about our capabilities
1997 */
1998 if (RELOC(of_platform) & PLATFORM_PSERIES)
1999 prom_send_capabilities();
2000#endif
2001
2002 /*
2003 * On pSeries and BPA, copy the CPU hold code
2004 */
2005 if (RELOC(of_platform) != PLATFORM_POWERMAC)
2006 copy_and_flush(0, KERNELBASE + offset, 0x100, 0);
2007
2008 /*
2009 * Do early parsing of command line
2010 */
2011 early_cmdline_parse();
2012
2013 /*
2014 * Initialize memory management within prom_init
2015 */
2016 prom_init_mem();
2017
2018 /*
2019 * Determine which cpu is actually running right _now_
2020 */
2021 prom_find_boot_cpu();
2022
2023 /*
2024 * Initialize display devices
2025 */
2026 prom_check_displays();
2027
2028#ifdef CONFIG_PPC64
2029 /*
2030 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
2031 * that uses the allocator, we need to make sure we get the top of memory
2032 * available for us here...
2033 */
2034 if (RELOC(of_platform) == PLATFORM_PSERIES)
2035 prom_initialize_tce_table();
2036#endif
2037
2038 /*
2039 * On non-powermacs, try to instantiate RTAS and puts all CPUs
2040 * in spin-loops. PowerMacs don't have a working RTAS and use
2041 * a different way to spin CPUs
2042 */
2043 if (RELOC(of_platform) != PLATFORM_POWERMAC) {
2044 prom_instantiate_rtas();
2045 prom_hold_cpus();
2046 }
2047
2048 /*
2049 * Fill in some infos for use by the kernel later on
2050 */
2051 if (RELOC(prom_memory_limit))
2052 prom_setprop(_prom->chosen, "linux,memory-limit",
2053 &RELOC(prom_memory_limit),
2054 sizeof(prom_memory_limit));
2055#ifdef CONFIG_PPC64
2056 if (RELOC(ppc64_iommu_off))
2057 prom_setprop(_prom->chosen, "linux,iommu-off", NULL, 0);
2058
2059 if (RELOC(iommu_force_on))
2060 prom_setprop(_prom->chosen, "linux,iommu-force-on", NULL, 0);
2061
2062 if (RELOC(prom_tce_alloc_start)) {
2063 prom_setprop(_prom->chosen, "linux,tce-alloc-start",
2064 &RELOC(prom_tce_alloc_start),
2065 sizeof(prom_tce_alloc_start));
2066 prom_setprop(_prom->chosen, "linux,tce-alloc-end",
2067 &RELOC(prom_tce_alloc_end),
2068 sizeof(prom_tce_alloc_end));
2069 }
2070#endif
2071
2072 /*
2073 * Fixup any known bugs in the device-tree
2074 */
2075 fixup_device_tree();
2076
2077 /*
2078 * Now finally create the flattened device-tree
2079 */
2080 prom_printf("copying OF device tree ...\n");
2081 flatten_device_tree();
2082
2083 /* in case stdin is USB and still active on IBM machines... */
2084 prom_close_stdin();
2085
2086 /*
2087 * Call OF "quiesce" method to shut down pending DMA's from
2088 * devices etc...
2089 */
2090 prom_printf("Calling quiesce ...\n");
2091 call_prom("quiesce", 0, 0);
2092
2093 /*
2094 * And finally, call the kernel passing it the flattened device
2095 * tree and NULL as r5, thus triggering the new entry point which
2096 * is common to us and kexec
2097 */
2098 hdr = RELOC(dt_header_start);
2099 prom_printf("returning from prom_init\n");
2100 prom_debug("->dt_header_start=0x%x\n", hdr);
2101
2102#ifdef CONFIG_PPC32
2103 reloc_got2(-offset);
2104#endif
2105
2106 __start(hdr, KERNELBASE + offset, 0);
2107
2108 return 0;
2109}
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
new file mode 100644
index 000000000000..568ea335d616
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace.c
@@ -0,0 +1,613 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Derived from "arch/m68k/kernel/ptrace.c"
6 * Copyright (C) 1994 by Hamish Macdonald
7 * Taken from linux/kernel/ptrace.c and modified for M680x0.
8 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
9 *
10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
11 * and Paul Mackerras (paulus@samba.org).
12 *
13 * This file is subject to the terms and conditions of the GNU General
14 * Public License. See the file README.legal in the main directory of
15 * this archive for more details.
16 */
17
18#include <linux/config.h>
19#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/errno.h>
25#include <linux/ptrace.h>
26#include <linux/user.h>
27#include <linux/security.h>
28#include <linux/signal.h>
29#include <linux/seccomp.h>
30#include <linux/audit.h>
31#ifdef CONFIG_PPC32
32#include <linux/module.h>
33#endif
34
35#include <asm/uaccess.h>
36#include <asm/page.h>
37#include <asm/pgtable.h>
38#include <asm/system.h>
39#ifdef CONFIG_PPC64
40#include <asm/ptrace-common.h>
41#endif
42
43#ifdef CONFIG_PPC32
44/*
45 * Set of msr bits that gdb can change on behalf of a process.
46 */
47#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
48#define MSR_DEBUGCHANGE 0
49#else
50#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
51#endif
52#endif /* CONFIG_PPC32 */
53
54/*
55 * does not yet catch signals sent when the child dies.
56 * in exit.c or in signal.c.
57 */
58
59#ifdef CONFIG_PPC32
60/*
61 * Get contents of register REGNO in task TASK.
62 */
63static inline unsigned long get_reg(struct task_struct *task, int regno)
64{
65 if (regno < sizeof(struct pt_regs) / sizeof(unsigned long)
66 && task->thread.regs != NULL)
67 return ((unsigned long *)task->thread.regs)[regno];
68 return (0);
69}
70
71/*
72 * Write contents of register REGNO in task TASK.
73 */
74static inline int put_reg(struct task_struct *task, int regno,
75 unsigned long data)
76{
77 if (regno <= PT_MQ && task->thread.regs != NULL) {
78 if (regno == PT_MSR)
79 data = (data & MSR_DEBUGCHANGE)
80 | (task->thread.regs->msr & ~MSR_DEBUGCHANGE);
81 ((unsigned long *)task->thread.regs)[regno] = data;
82 return 0;
83 }
84 return -EIO;
85}
86
87#ifdef CONFIG_ALTIVEC
88/*
89 * Get contents of AltiVec register state in task TASK
90 */
91static inline int get_vrregs(unsigned long __user *data, struct task_struct *task)
92{
93 int i, j;
94
95 if (!access_ok(VERIFY_WRITE, data, 133 * sizeof(unsigned long)))
96 return -EFAULT;
97
98 /* copy AltiVec registers VR[0] .. VR[31] */
99 for (i = 0; i < 32; i++)
100 for (j = 0; j < 4; j++, data++)
101 if (__put_user(task->thread.vr[i].u[j], data))
102 return -EFAULT;
103
104 /* copy VSCR */
105 for (i = 0; i < 4; i++, data++)
106 if (__put_user(task->thread.vscr.u[i], data))
107 return -EFAULT;
108
109 /* copy VRSAVE */
110 if (__put_user(task->thread.vrsave, data))
111 return -EFAULT;
112
113 return 0;
114}
115
116/*
117 * Write contents of AltiVec register state into task TASK.
118 */
119static inline int set_vrregs(struct task_struct *task, unsigned long __user *data)
120{
121 int i, j;
122
123 if (!access_ok(VERIFY_READ, data, 133 * sizeof(unsigned long)))
124 return -EFAULT;
125
126 /* copy AltiVec registers VR[0] .. VR[31] */
127 for (i = 0; i < 32; i++)
128 for (j = 0; j < 4; j++, data++)
129 if (__get_user(task->thread.vr[i].u[j], data))
130 return -EFAULT;
131
132 /* copy VSCR */
133 for (i = 0; i < 4; i++, data++)
134 if (__get_user(task->thread.vscr.u[i], data))
135 return -EFAULT;
136
137 /* copy VRSAVE */
138 if (__get_user(task->thread.vrsave, data))
139 return -EFAULT;
140
141 return 0;
142}
143#endif
144
145#ifdef CONFIG_SPE
146
147/*
148 * For get_evrregs/set_evrregs functions 'data' has the following layout:
149 *
150 * struct {
151 * u32 evr[32];
152 * u64 acc;
153 * u32 spefscr;
154 * }
155 */
156
157/*
158 * Get contents of SPE register state in task TASK.
159 */
160static inline int get_evrregs(unsigned long *data, struct task_struct *task)
161{
162 int i;
163
164 if (!access_ok(VERIFY_WRITE, data, 35 * sizeof(unsigned long)))
165 return -EFAULT;
166
167 /* copy SPEFSCR */
168 if (__put_user(task->thread.spefscr, &data[34]))
169 return -EFAULT;
170
171 /* copy SPE registers EVR[0] .. EVR[31] */
172 for (i = 0; i < 32; i++, data++)
173 if (__put_user(task->thread.evr[i], data))
174 return -EFAULT;
175
176 /* copy ACC */
177 if (__put_user64(task->thread.acc, (unsigned long long *)data))
178 return -EFAULT;
179
180 return 0;
181}
182
183/*
184 * Write contents of SPE register state into task TASK.
185 */
186static inline int set_evrregs(struct task_struct *task, unsigned long *data)
187{
188 int i;
189
190 if (!access_ok(VERIFY_READ, data, 35 * sizeof(unsigned long)))
191 return -EFAULT;
192
193 /* copy SPEFSCR */
194 if (__get_user(task->thread.spefscr, &data[34]))
195 return -EFAULT;
196
197 /* copy SPE registers EVR[0] .. EVR[31] */
198 for (i = 0; i < 32; i++, data++)
199 if (__get_user(task->thread.evr[i], data))
200 return -EFAULT;
201 /* copy ACC */
202 if (__get_user64(task->thread.acc, (unsigned long long*)data))
203 return -EFAULT;
204
205 return 0;
206}
207#endif /* CONFIG_SPE */
208
209static inline void
210set_single_step(struct task_struct *task)
211{
212 struct pt_regs *regs = task->thread.regs;
213
214 if (regs != NULL) {
215#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
216 task->thread.dbcr0 = DBCR0_IDM | DBCR0_IC;
217 regs->msr |= MSR_DE;
218#else
219 regs->msr |= MSR_SE;
220#endif
221 }
222}
223
224static inline void
225clear_single_step(struct task_struct *task)
226{
227 struct pt_regs *regs = task->thread.regs;
228
229 if (regs != NULL) {
230#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
231 task->thread.dbcr0 = 0;
232 regs->msr &= ~MSR_DE;
233#else
234 regs->msr &= ~MSR_SE;
235#endif
236 }
237}
238#endif /* CONFIG_PPC32 */
239
240/*
241 * Called by kernel/ptrace.c when detaching..
242 *
243 * Make sure single step bits etc are not set.
244 */
245void ptrace_disable(struct task_struct *child)
246{
247 /* make sure the single step bit is not set. */
248 clear_single_step(child);
249}
250
251long sys_ptrace(long request, long pid, long addr, long data)
252{
253 struct task_struct *child;
254 int ret = -EPERM;
255
256 lock_kernel();
257 if (request == PTRACE_TRACEME) {
258 /* are we already being traced? */
259 if (current->ptrace & PT_PTRACED)
260 goto out;
261 ret = security_ptrace(current->parent, current);
262 if (ret)
263 goto out;
264 /* set the ptrace bit in the process flags. */
265 current->ptrace |= PT_PTRACED;
266 ret = 0;
267 goto out;
268 }
269 ret = -ESRCH;
270 read_lock(&tasklist_lock);
271 child = find_task_by_pid(pid);
272 if (child)
273 get_task_struct(child);
274 read_unlock(&tasklist_lock);
275 if (!child)
276 goto out;
277
278 ret = -EPERM;
279 if (pid == 1) /* you may not mess with init */
280 goto out_tsk;
281
282 if (request == PTRACE_ATTACH) {
283 ret = ptrace_attach(child);
284 goto out_tsk;
285 }
286
287 ret = ptrace_check_attach(child, request == PTRACE_KILL);
288 if (ret < 0)
289 goto out_tsk;
290
291 switch (request) {
292 /* when I and D space are separate, these will need to be fixed. */
293 case PTRACE_PEEKTEXT: /* read word at location addr. */
294 case PTRACE_PEEKDATA: {
295 unsigned long tmp;
296 int copied;
297
298 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
299 ret = -EIO;
300 if (copied != sizeof(tmp))
301 break;
302 ret = put_user(tmp,(unsigned long __user *) data);
303 break;
304 }
305
306 /* read the word at location addr in the USER area. */
307 case PTRACE_PEEKUSR: {
308 unsigned long index, tmp;
309
310 ret = -EIO;
311 /* convert to index and check */
312#ifdef CONFIG_PPC32
313 index = (unsigned long) addr >> 2;
314 if ((addr & 3) || (index > PT_FPSCR)
315 || (child->thread.regs == NULL))
316#else
317 index = (unsigned long) addr >> 3;
318 if ((addr & 7) || (index > PT_FPSCR))
319#endif
320 break;
321
322#ifdef CONFIG_PPC32
323 CHECK_FULL_REGS(child->thread.regs);
324#endif
325 if (index < PT_FPR0) {
326 tmp = get_reg(child, (int) index);
327 } else {
328 flush_fp_to_thread(child);
329 tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
330 }
331 ret = put_user(tmp,(unsigned long __user *) data);
332 break;
333 }
334
335 /* If I and D space are separate, this will have to be fixed. */
336 case PTRACE_POKETEXT: /* write the word at location addr. */
337 case PTRACE_POKEDATA:
338 ret = 0;
339 if (access_process_vm(child, addr, &data, sizeof(data), 1)
340 == sizeof(data))
341 break;
342 ret = -EIO;
343 break;
344
345 /* write the word at location addr in the USER area */
346 case PTRACE_POKEUSR: {
347 unsigned long index;
348
349 ret = -EIO;
350 /* convert to index and check */
351#ifdef CONFIG_PPC32
352 index = (unsigned long) addr >> 2;
353 if ((addr & 3) || (index > PT_FPSCR)
354 || (child->thread.regs == NULL))
355#else
356 index = (unsigned long) addr >> 3;
357 if ((addr & 7) || (index > PT_FPSCR))
358#endif
359 break;
360
361#ifdef CONFIG_PPC32
362 CHECK_FULL_REGS(child->thread.regs);
363#endif
364 if (index == PT_ORIG_R3)
365 break;
366 if (index < PT_FPR0) {
367 ret = put_reg(child, index, data);
368 } else {
369 flush_fp_to_thread(child);
370 ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
371 ret = 0;
372 }
373 break;
374 }
375
376 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
377 case PTRACE_CONT: { /* restart after signal. */
378 ret = -EIO;
379 if (!valid_signal(data))
380 break;
381 if (request == PTRACE_SYSCALL)
382 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
383 else
384 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
385 child->exit_code = data;
386 /* make sure the single step bit is not set. */
387 clear_single_step(child);
388 wake_up_process(child);
389 ret = 0;
390 break;
391 }
392
393/*
394 * make the child exit. Best I can do is send it a sigkill.
395 * perhaps it should be put in the status that it wants to
396 * exit.
397 */
398 case PTRACE_KILL: {
399 ret = 0;
400 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
401 break;
402 child->exit_code = SIGKILL;
403 /* make sure the single step bit is not set. */
404 clear_single_step(child);
405 wake_up_process(child);
406 break;
407 }
408
409 case PTRACE_SINGLESTEP: { /* set the trap flag. */
410 ret = -EIO;
411 if (!valid_signal(data))
412 break;
413 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
414 set_single_step(child);
415 child->exit_code = data;
416 /* give it a chance to run. */
417 wake_up_process(child);
418 ret = 0;
419 break;
420 }
421
422#ifdef CONFIG_PPC64
423 case PTRACE_GET_DEBUGREG: {
424 ret = -EINVAL;
425 /* We only support one DABR and no IABRS at the moment */
426 if (addr > 0)
427 break;
428 ret = put_user(child->thread.dabr,
429 (unsigned long __user *)data);
430 break;
431 }
432
433 case PTRACE_SET_DEBUGREG:
434 ret = ptrace_set_debugreg(child, addr, data);
435 break;
436#endif
437
438 case PTRACE_DETACH:
439 ret = ptrace_detach(child, data);
440 break;
441
442#ifdef CONFIG_PPC64
443 case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
444 int i;
445 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
446 unsigned long __user *tmp = (unsigned long __user *)addr;
447
448 for (i = 0; i < 32; i++) {
449 ret = put_user(*reg, tmp);
450 if (ret)
451 break;
452 reg++;
453 tmp++;
454 }
455 break;
456 }
457
458 case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
459 int i;
460 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
461 unsigned long __user *tmp = (unsigned long __user *)addr;
462
463 for (i = 0; i < 32; i++) {
464 ret = get_user(*reg, tmp);
465 if (ret)
466 break;
467 reg++;
468 tmp++;
469 }
470 break;
471 }
472
473 case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
474 int i;
475 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
476 unsigned long __user *tmp = (unsigned long __user *)addr;
477
478 flush_fp_to_thread(child);
479
480 for (i = 0; i < 32; i++) {
481 ret = put_user(*reg, tmp);
482 if (ret)
483 break;
484 reg++;
485 tmp++;
486 }
487 break;
488 }
489
490 case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
491 int i;
492 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
493 unsigned long __user *tmp = (unsigned long __user *)addr;
494
495 flush_fp_to_thread(child);
496
497 for (i = 0; i < 32; i++) {
498 ret = get_user(*reg, tmp);
499 if (ret)
500 break;
501 reg++;
502 tmp++;
503 }
504 break;
505 }
506#endif /* CONFIG_PPC64 */
507
508#ifdef CONFIG_ALTIVEC
509 case PTRACE_GETVRREGS:
510 /* Get the child altivec register state. */
511 flush_altivec_to_thread(child);
512 ret = get_vrregs((unsigned long __user *)data, child);
513 break;
514
515 case PTRACE_SETVRREGS:
516 /* Set the child altivec register state. */
517 flush_altivec_to_thread(child);
518 ret = set_vrregs(child, (unsigned long __user *)data);
519 break;
520#endif
521#ifdef CONFIG_SPE
522 case PTRACE_GETEVRREGS:
523 /* Get the child spe register state. */
524 if (child->thread.regs->msr & MSR_SPE)
525 giveup_spe(child);
526 ret = get_evrregs((unsigned long __user *)data, child);
527 break;
528
529 case PTRACE_SETEVRREGS:
530 /* Set the child spe register state. */
531 /* this is to clear the MSR_SPE bit to force a reload
532 * of register state from memory */
533 if (child->thread.regs->msr & MSR_SPE)
534 giveup_spe(child);
535 ret = set_evrregs(child, (unsigned long __user *)data);
536 break;
537#endif
538
539 default:
540 ret = ptrace_request(child, request, addr, data);
541 break;
542 }
543out_tsk:
544 put_task_struct(child);
545out:
546 unlock_kernel();
547 return ret;
548}
549
550static void do_syscall_trace(void)
551{
552 /* the 0x80 provides a way for the tracing parent to distinguish
553 between a syscall stop and SIGTRAP delivery */
554 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
555 ? 0x80 : 0));
556
557 /*
558 * this isn't the same as continuing with a signal, but it will do
559 * for normal use. strace only continues with a signal if the
560 * stopping signal is not SIGTRAP. -brl
561 */
562 if (current->exit_code) {
563 send_sig(current->exit_code, current, 1);
564 current->exit_code = 0;
565 }
566}
567
568void do_syscall_trace_enter(struct pt_regs *regs)
569{
570#ifdef CONFIG_PPC64
571 secure_computing(regs->gpr[0]);
572#endif
573
574 if (test_thread_flag(TIF_SYSCALL_TRACE)
575 && (current->ptrace & PT_PTRACED))
576 do_syscall_trace();
577
578 if (unlikely(current->audit_context))
579 audit_syscall_entry(current,
580#ifdef CONFIG_PPC32
581 AUDIT_ARCH_PPC,
582#else
583 test_thread_flag(TIF_32BIT)?AUDIT_ARCH_PPC:AUDIT_ARCH_PPC64,
584#endif
585 regs->gpr[0],
586 regs->gpr[3], regs->gpr[4],
587 regs->gpr[5], regs->gpr[6]);
588}
589
590void do_syscall_trace_leave(struct pt_regs *regs)
591{
592#ifdef CONFIG_PPC32
593 secure_computing(regs->gpr[0]);
594#endif
595
596 if (unlikely(current->audit_context))
597 audit_syscall_exit(current,
598 (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
599 regs->result);
600
601 if ((test_thread_flag(TIF_SYSCALL_TRACE)
602#ifdef CONFIG_PPC64
603 || test_thread_flag(TIF_SINGLESTEP)
604#endif
605 )
606 && (current->ptrace & PT_PTRACED))
607 do_syscall_trace();
608}
609
610#ifdef CONFIG_PPC32
611EXPORT_SYMBOL(do_syscall_trace_enter);
612EXPORT_SYMBOL(do_syscall_trace_leave);
613#endif
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
new file mode 100644
index 000000000000..91eb952e0293
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -0,0 +1,450 @@
1/*
2 * ptrace for 32-bit processes running on a 64-bit kernel.
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Derived from "arch/m68k/kernel/ptrace.c"
8 * Copyright (C) 1994 by Hamish Macdonald
9 * Taken from linux/kernel/ptrace.c and modified for M680x0.
10 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
11 *
12 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
13 * and Paul Mackerras (paulus@samba.org).
14 *
15 * This file is subject to the terms and conditions of the GNU General
16 * Public License. See the file COPYING in the main directory of
17 * this archive for more details.
18 */
19
20#include <linux/config.h>
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26#include <linux/errno.h>
27#include <linux/ptrace.h>
28#include <linux/user.h>
29#include <linux/security.h>
30#include <linux/signal.h>
31
32#include <asm/uaccess.h>
33#include <asm/page.h>
34#include <asm/pgtable.h>
35#include <asm/system.h>
36#include <asm/ptrace-common.h>
37
38/*
39 * does not yet catch signals sent when the child dies.
40 * in exit.c or in signal.c.
41 */
42
43long compat_sys_ptrace(int request, int pid, unsigned long addr,
44 unsigned long data)
45{
46 struct task_struct *child;
47 int ret = -EPERM;
48
49 lock_kernel();
50 if (request == PTRACE_TRACEME) {
51 /* are we already being traced? */
52 if (current->ptrace & PT_PTRACED)
53 goto out;
54 ret = security_ptrace(current->parent, current);
55 if (ret)
56 goto out;
57 /* set the ptrace bit in the process flags. */
58 current->ptrace |= PT_PTRACED;
59 ret = 0;
60 goto out;
61 }
62 ret = -ESRCH;
63 read_lock(&tasklist_lock);
64 child = find_task_by_pid(pid);
65 if (child)
66 get_task_struct(child);
67 read_unlock(&tasklist_lock);
68 if (!child)
69 goto out;
70
71 ret = -EPERM;
72 if (pid == 1) /* you may not mess with init */
73 goto out_tsk;
74
75 if (request == PTRACE_ATTACH) {
76 ret = ptrace_attach(child);
77 goto out_tsk;
78 }
79
80 ret = ptrace_check_attach(child, request == PTRACE_KILL);
81 if (ret < 0)
82 goto out_tsk;
83
84 switch (request) {
85 /* when I and D space are separate, these will need to be fixed. */
86 case PTRACE_PEEKTEXT: /* read word at location addr. */
87 case PTRACE_PEEKDATA: {
88 unsigned int tmp;
89 int copied;
90
91 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
92 ret = -EIO;
93 if (copied != sizeof(tmp))
94 break;
95 ret = put_user(tmp, (u32 __user *)data);
96 break;
97 }
98
99 /*
100 * Read 4 bytes of the other process' storage
101 * data is a pointer specifying where the user wants the
102 * 4 bytes copied into
103 * addr is a pointer in the user's storage that contains an 8 byte
104 * address in the other process of the 4 bytes that is to be read
105 * (this is run in a 32-bit process looking at a 64-bit process)
106 * when I and D space are separate, these will need to be fixed.
107 */
108 case PPC_PTRACE_PEEKTEXT_3264:
109 case PPC_PTRACE_PEEKDATA_3264: {
110 u32 tmp;
111 int copied;
112 u32 __user * addrOthers;
113
114 ret = -EIO;
115
116 /* Get the addr in the other process that we want to read */
117 if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
118 break;
119
120 copied = access_process_vm(child, (u64)addrOthers, &tmp,
121 sizeof(tmp), 0);
122 if (copied != sizeof(tmp))
123 break;
124 ret = put_user(tmp, (u32 __user *)data);
125 break;
126 }
127
128 /* Read a register (specified by ADDR) out of the "user area" */
129 case PTRACE_PEEKUSR: {
130 int index;
131 unsigned long tmp;
132
133 ret = -EIO;
134 /* convert to index and check */
135 index = (unsigned long) addr >> 2;
136 if ((addr & 3) || (index > PT_FPSCR32))
137 break;
138
139 if (index < PT_FPR0) {
140 tmp = get_reg(child, index);
141 } else {
142 flush_fp_to_thread(child);
143 /*
144 * the user space code considers the floating point
145 * to be an array of unsigned int (32 bits) - the
146 * index passed in is based on this assumption.
147 */
148 tmp = ((unsigned int *)child->thread.fpr)[index - PT_FPR0];
149 }
150 ret = put_user((unsigned int)tmp, (u32 __user *)data);
151 break;
152 }
153
154 /*
155 * Read 4 bytes out of the other process' pt_regs area
156 * data is a pointer specifying where the user wants the
157 * 4 bytes copied into
158 * addr is the offset into the other process' pt_regs structure
159 * that is to be read
160 * (this is run in a 32-bit process looking at a 64-bit process)
161 */
162 case PPC_PTRACE_PEEKUSR_3264: {
163 u32 index;
164 u32 reg32bits;
165 u64 tmp;
166 u32 numReg;
167 u32 part;
168
169 ret = -EIO;
170 /* Determine which register the user wants */
171 index = (u64)addr >> 2;
172 numReg = index / 2;
173 /* Determine which part of the register the user wants */
174 if (index % 2)
175 part = 1; /* want the 2nd half of the register (right-most). */
176 else
177 part = 0; /* want the 1st half of the register (left-most). */
178
179 /* Validate the input - check to see if address is on the wrong boundary or beyond the end of the user area */
180 if ((addr & 3) || numReg > PT_FPSCR)
181 break;
182
183 if (numReg >= PT_FPR0) {
184 flush_fp_to_thread(child);
185 tmp = ((unsigned long int *)child->thread.fpr)[numReg - PT_FPR0];
186 } else { /* register within PT_REGS struct */
187 tmp = get_reg(child, numReg);
188 }
189 reg32bits = ((u32*)&tmp)[part];
190 ret = put_user(reg32bits, (u32 __user *)data);
191 break;
192 }
193
194 /* If I and D space are separate, this will have to be fixed. */
195 case PTRACE_POKETEXT: /* write the word at location addr. */
196 case PTRACE_POKEDATA: {
197 unsigned int tmp;
198 tmp = data;
199 ret = 0;
200 if (access_process_vm(child, addr, &tmp, sizeof(tmp), 1)
201 == sizeof(tmp))
202 break;
203 ret = -EIO;
204 break;
205 }
206
207 /*
208 * Write 4 bytes into the other process' storage
209 * data is the 4 bytes that the user wants written
210 * addr is a pointer in the user's storage that contains an
211 * 8 byte address in the other process where the 4 bytes
212 * that is to be written
213 * (this is run in a 32-bit process looking at a 64-bit process)
214 * when I and D space are separate, these will need to be fixed.
215 */
216 case PPC_PTRACE_POKETEXT_3264:
217 case PPC_PTRACE_POKEDATA_3264: {
218 u32 tmp = data;
219 u32 __user * addrOthers;
220
221 /* Get the addr in the other process that we want to write into */
222 ret = -EIO;
223 if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
224 break;
225 ret = 0;
226 if (access_process_vm(child, (u64)addrOthers, &tmp,
227 sizeof(tmp), 1) == sizeof(tmp))
228 break;
229 ret = -EIO;
230 break;
231 }
232
233 /* write the word at location addr in the USER area */
234 case PTRACE_POKEUSR: {
235 unsigned long index;
236
237 ret = -EIO;
238 /* convert to index and check */
239 index = (unsigned long) addr >> 2;
240 if ((addr & 3) || (index > PT_FPSCR32))
241 break;
242
243 if (index == PT_ORIG_R3)
244 break;
245 if (index < PT_FPR0) {
246 ret = put_reg(child, index, data);
247 } else {
248 flush_fp_to_thread(child);
249 /*
250 * the user space code considers the floating point
251 * to be an array of unsigned int (32 bits) - the
252 * index passed in is based on this assumption.
253 */
254 ((unsigned int *)child->thread.fpr)[index - PT_FPR0] = data;
255 ret = 0;
256 }
257 break;
258 }
259
260 /*
261 * Write 4 bytes into the other process' pt_regs area
262 * data is the 4 bytes that the user wants written
263 * addr is the offset into the other process' pt_regs structure
264 * that is to be written into
265 * (this is run in a 32-bit process looking at a 64-bit process)
266 */
267 case PPC_PTRACE_POKEUSR_3264: {
268 u32 index;
269 u32 numReg;
270
271 ret = -EIO;
272 /* Determine which register the user wants */
273 index = (u64)addr >> 2;
274 numReg = index / 2;
275 /*
276 * Validate the input - check to see if address is on the
277 * wrong boundary or beyond the end of the user area
278 */
279 if ((addr & 3) || (numReg > PT_FPSCR))
280 break;
281 /* Insure it is a register we let them change */
282 if ((numReg == PT_ORIG_R3)
283 || ((numReg > PT_CCR) && (numReg < PT_FPR0)))
284 break;
285 if (numReg >= PT_FPR0) {
286 flush_fp_to_thread(child);
287 }
288 if (numReg == PT_MSR)
289 data = (data & MSR_DEBUGCHANGE)
290 | (child->thread.regs->msr & ~MSR_DEBUGCHANGE);
291 ((u32*)child->thread.regs)[index] = data;
292 ret = 0;
293 break;
294 }
295
296 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
297 case PTRACE_CONT: { /* restart after signal. */
298 ret = -EIO;
299 if (!valid_signal(data))
300 break;
301 if (request == PTRACE_SYSCALL)
302 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
303 else
304 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
305 child->exit_code = data;
306 /* make sure the single step bit is not set. */
307 clear_single_step(child);
308 wake_up_process(child);
309 ret = 0;
310 break;
311 }
312
313 /*
314 * make the child exit. Best I can do is send it a sigkill.
315 * perhaps it should be put in the status that it wants to
316 * exit.
317 */
318 case PTRACE_KILL: {
319 ret = 0;
320 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
321 break;
322 child->exit_code = SIGKILL;
323 /* make sure the single step bit is not set. */
324 clear_single_step(child);
325 wake_up_process(child);
326 break;
327 }
328
329 case PTRACE_SINGLESTEP: { /* set the trap flag. */
330 ret = -EIO;
331 if (!valid_signal(data))
332 break;
333 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
334 set_single_step(child);
335 child->exit_code = data;
336 /* give it a chance to run. */
337 wake_up_process(child);
338 ret = 0;
339 break;
340 }
341
342 case PTRACE_GET_DEBUGREG: {
343 ret = -EINVAL;
344 /* We only support one DABR and no IABRS at the moment */
345 if (addr > 0)
346 break;
347 ret = put_user(child->thread.dabr, (u32 __user *)data);
348 break;
349 }
350
351 case PTRACE_SET_DEBUGREG:
352 ret = ptrace_set_debugreg(child, addr, data);
353 break;
354
355 case PTRACE_DETACH:
356 ret = ptrace_detach(child, data);
357 break;
358
359 case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
360 int i;
361 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
362 unsigned int __user *tmp = (unsigned int __user *)addr;
363
364 for (i = 0; i < 32; i++) {
365 ret = put_user(*reg, tmp);
366 if (ret)
367 break;
368 reg++;
369 tmp++;
370 }
371 break;
372 }
373
374 case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
375 int i;
376 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
377 unsigned int __user *tmp = (unsigned int __user *)addr;
378
379 for (i = 0; i < 32; i++) {
380 ret = get_user(*reg, tmp);
381 if (ret)
382 break;
383 reg++;
384 tmp++;
385 }
386 break;
387 }
388
389 case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
390 int i;
391 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
392 unsigned int __user *tmp = (unsigned int __user *)addr;
393
394 flush_fp_to_thread(child);
395
396 for (i = 0; i < 32; i++) {
397 ret = put_user(*reg, tmp);
398 if (ret)
399 break;
400 reg++;
401 tmp++;
402 }
403 break;
404 }
405
406 case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
407 int i;
408 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
409 unsigned int __user *tmp = (unsigned int __user *)addr;
410
411 flush_fp_to_thread(child);
412
413 for (i = 0; i < 32; i++) {
414 ret = get_user(*reg, tmp);
415 if (ret)
416 break;
417 reg++;
418 tmp++;
419 }
420 break;
421 }
422
423 case PTRACE_GETEVENTMSG:
424 ret = put_user(child->ptrace_message, (unsigned int __user *) data);
425 break;
426
427#ifdef CONFIG_ALTIVEC
428 case PTRACE_GETVRREGS:
429 /* Get the child altivec register state. */
430 flush_altivec_to_thread(child);
431 ret = get_vrregs((unsigned long __user *)data, child);
432 break;
433
434 case PTRACE_SETVRREGS:
435 /* Set the child altivec register state. */
436 flush_altivec_to_thread(child);
437 ret = set_vrregs(child, (unsigned long __user *)data);
438 break;
439#endif
440
441 default:
442 ret = ptrace_request(child, request, addr, data);
443 break;
444 }
445out_tsk:
446 put_task_struct(child);
447out:
448 unlock_kernel();
449 return ret;
450}
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
new file mode 100644
index 000000000000..4d22eeeeb91d
--- /dev/null
+++ b/arch/powerpc/kernel/rtas.c
@@ -0,0 +1,680 @@
1/*
2 *
3 * Procedures for interfacing to the RTAS on CHRP machines.
4 *
5 * Peter Bergner, IBM March 2001.
6 * Copyright (C) 2001 IBM.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <stdarg.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/spinlock.h>
18#include <linux/module.h>
19#include <linux/init.h>
20
21#include <asm/prom.h>
22#include <asm/rtas.h>
23#include <asm/semaphore.h>
24#include <asm/machdep.h>
25#include <asm/page.h>
26#include <asm/param.h>
27#include <asm/system.h>
28#include <asm/delay.h>
29#include <asm/uaccess.h>
30#include <asm/lmb.h>
31#ifdef CONFIG_PPC64
32#include <asm/systemcfg.h>
33#endif
34
35struct rtas_t rtas = {
36 .lock = SPIN_LOCK_UNLOCKED
37};
38
39EXPORT_SYMBOL(rtas);
40
41DEFINE_SPINLOCK(rtas_data_buf_lock);
42char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
43unsigned long rtas_rmo_buf;
44
45/*
46 * call_rtas_display_status and call_rtas_display_status_delay
47 * are designed only for very early low-level debugging, which
48 * is why the token is hard-coded to 10.
49 */
50void call_rtas_display_status(unsigned char c)
51{
52 struct rtas_args *args = &rtas.args;
53 unsigned long s;
54
55 if (!rtas.base)
56 return;
57 spin_lock_irqsave(&rtas.lock, s);
58
59 args->token = 10;
60 args->nargs = 1;
61 args->nret = 1;
62 args->rets = (rtas_arg_t *)&(args->args[1]);
63 args->args[0] = (int)c;
64
65 enter_rtas(__pa(args));
66
67 spin_unlock_irqrestore(&rtas.lock, s);
68}
69
70void call_rtas_display_status_delay(unsigned char c)
71{
72 static int pending_newline = 0; /* did last write end with unprinted newline? */
73 static int width = 16;
74
75 if (c == '\n') {
76 while (width-- > 0)
77 call_rtas_display_status(' ');
78 width = 16;
79 udelay(500000);
80 pending_newline = 1;
81 } else {
82 if (pending_newline) {
83 call_rtas_display_status('\r');
84 call_rtas_display_status('\n');
85 }
86 pending_newline = 0;
87 if (width--) {
88 call_rtas_display_status(c);
89 udelay(10000);
90 }
91 }
92}
93
94void rtas_progress(char *s, unsigned short hex)
95{
96 struct device_node *root;
97 int width, *p;
98 char *os;
99 static int display_character, set_indicator;
100 static int display_width, display_lines, *row_width, form_feed;
101 static DEFINE_SPINLOCK(progress_lock);
102 static int current_line;
103 static int pending_newline = 0; /* did last write end with unprinted newline? */
104
105 if (!rtas.base)
106 return;
107
108 if (display_width == 0) {
109 display_width = 0x10;
110 if ((root = find_path_device("/rtas"))) {
111 if ((p = (unsigned int *)get_property(root,
112 "ibm,display-line-length", NULL)))
113 display_width = *p;
114 if ((p = (unsigned int *)get_property(root,
115 "ibm,form-feed", NULL)))
116 form_feed = *p;
117 if ((p = (unsigned int *)get_property(root,
118 "ibm,display-number-of-lines", NULL)))
119 display_lines = *p;
120 row_width = (unsigned int *)get_property(root,
121 "ibm,display-truncation-length", NULL);
122 }
123 display_character = rtas_token("display-character");
124 set_indicator = rtas_token("set-indicator");
125 }
126
127 if (display_character == RTAS_UNKNOWN_SERVICE) {
128 /* use hex display if available */
129 if (set_indicator != RTAS_UNKNOWN_SERVICE)
130 rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
131 return;
132 }
133
134 spin_lock(&progress_lock);
135
136 /*
137 * Last write ended with newline, but we didn't print it since
138 * it would just clear the bottom line of output. Print it now
139 * instead.
140 *
141 * If no newline is pending and form feed is supported, clear the
142 * display with a form feed; otherwise, print a CR to start output
143 * at the beginning of the line.
144 */
145 if (pending_newline) {
146 rtas_call(display_character, 1, 1, NULL, '\r');
147 rtas_call(display_character, 1, 1, NULL, '\n');
148 pending_newline = 0;
149 } else {
150 current_line = 0;
151 if (form_feed)
152 rtas_call(display_character, 1, 1, NULL,
153 (char)form_feed);
154 else
155 rtas_call(display_character, 1, 1, NULL, '\r');
156 }
157
158 if (row_width)
159 width = row_width[current_line];
160 else
161 width = display_width;
162 os = s;
163 while (*os) {
164 if (*os == '\n' || *os == '\r') {
165 /* If newline is the last character, save it
166 * until next call to avoid bumping up the
167 * display output.
168 */
169 if (*os == '\n' && !os[1]) {
170 pending_newline = 1;
171 current_line++;
172 if (current_line > display_lines-1)
173 current_line = display_lines-1;
174 spin_unlock(&progress_lock);
175 return;
176 }
177
178 /* RTAS wants CR-LF, not just LF */
179
180 if (*os == '\n') {
181 rtas_call(display_character, 1, 1, NULL, '\r');
182 rtas_call(display_character, 1, 1, NULL, '\n');
183 } else {
184 /* CR might be used to re-draw a line, so we'll
185 * leave it alone and not add LF.
186 */
187 rtas_call(display_character, 1, 1, NULL, *os);
188 }
189
190 if (row_width)
191 width = row_width[current_line];
192 else
193 width = display_width;
194 } else {
195 width--;
196 rtas_call(display_character, 1, 1, NULL, *os);
197 }
198
199 os++;
200
201 /* if we overwrite the screen length */
202 if (width <= 0)
203 while ((*os != 0) && (*os != '\n') && (*os != '\r'))
204 os++;
205 }
206
207 spin_unlock(&progress_lock);
208}
209
210int rtas_token(const char *service)
211{
212 int *tokp;
213 if (rtas.dev == NULL)
214 return RTAS_UNKNOWN_SERVICE;
215 tokp = (int *) get_property(rtas.dev, service, NULL);
216 return tokp ? *tokp : RTAS_UNKNOWN_SERVICE;
217}
218
219#ifdef CONFIG_RTAS_ERROR_LOGGING
220/*
221 * Return the firmware-specified size of the error log buffer
222 * for all rtas calls that require an error buffer argument.
223 * This includes 'check-exception' and 'rtas-last-error'.
224 */
225int rtas_get_error_log_max(void)
226{
227 static int rtas_error_log_max;
228 if (rtas_error_log_max)
229 return rtas_error_log_max;
230
231 rtas_error_log_max = rtas_token ("rtas-error-log-max");
232 if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) ||
233 (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) {
234 printk (KERN_WARNING "RTAS: bad log buffer size %d\n",
235 rtas_error_log_max);
236 rtas_error_log_max = RTAS_ERROR_LOG_MAX;
237 }
238 return rtas_error_log_max;
239}
240EXPORT_SYMBOL(rtas_get_error_log_max);
241
242
243char rtas_err_buf[RTAS_ERROR_LOG_MAX];
244int rtas_last_error_token;
245
246/** Return a copy of the detailed error text associated with the
247 * most recent failed call to rtas. Because the error text
248 * might go stale if there are any other intervening rtas calls,
249 * this routine must be called atomically with whatever produced
250 * the error (i.e. with rtas.lock still held from the previous call).
251 */
252static char *__fetch_rtas_last_error(char *altbuf)
253{
254 struct rtas_args err_args, save_args;
255 u32 bufsz;
256 char *buf = NULL;
257
258 if (rtas_last_error_token == -1)
259 return NULL;
260
261 bufsz = rtas_get_error_log_max();
262
263 err_args.token = rtas_last_error_token;
264 err_args.nargs = 2;
265 err_args.nret = 1;
266 err_args.args[0] = (rtas_arg_t)__pa(rtas_err_buf);
267 err_args.args[1] = bufsz;
268 err_args.args[2] = 0;
269
270 save_args = rtas.args;
271 rtas.args = err_args;
272
273 enter_rtas(__pa(&rtas.args));
274
275 err_args = rtas.args;
276 rtas.args = save_args;
277
278 /* Log the error in the unlikely case that there was one. */
279 if (unlikely(err_args.args[2] == 0)) {
280 if (altbuf) {
281 buf = altbuf;
282 } else {
283 buf = rtas_err_buf;
284 if (mem_init_done)
285 buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
286 }
287 if (buf)
288 memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
289 }
290
291 return buf;
292}
293
294#define get_errorlog_buffer() kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL)
295
296#else /* CONFIG_RTAS_ERROR_LOGGING */
297#define __fetch_rtas_last_error(x) NULL
298#define get_errorlog_buffer() NULL
299#endif
300
301int rtas_call(int token, int nargs, int nret, int *outputs, ...)
302{
303 va_list list;
304 int i;
305 unsigned long s;
306 struct rtas_args *rtas_args;
307 char *buff_copy = NULL;
308 int ret;
309
310 if (token == RTAS_UNKNOWN_SERVICE)
311 return -1;
312
313 /* Gotta do something different here, use global lock for now... */
314 spin_lock_irqsave(&rtas.lock, s);
315 rtas_args = &rtas.args;
316
317 rtas_args->token = token;
318 rtas_args->nargs = nargs;
319 rtas_args->nret = nret;
320 rtas_args->rets = (rtas_arg_t *)&(rtas_args->args[nargs]);
321 va_start(list, outputs);
322 for (i = 0; i < nargs; ++i)
323 rtas_args->args[i] = va_arg(list, rtas_arg_t);
324 va_end(list);
325
326 for (i = 0; i < nret; ++i)
327 rtas_args->rets[i] = 0;
328
329 enter_rtas(__pa(rtas_args));
330
331 /* A -1 return code indicates that the last command couldn't
332 be completed due to a hardware error. */
333 if (rtas_args->rets[0] == -1)
334 buff_copy = __fetch_rtas_last_error(NULL);
335
336 if (nret > 1 && outputs != NULL)
337 for (i = 0; i < nret-1; ++i)
338 outputs[i] = rtas_args->rets[i+1];
339 ret = (nret > 0)? rtas_args->rets[0]: 0;
340
341 /* Gotta do something different here, use global lock for now... */
342 spin_unlock_irqrestore(&rtas.lock, s);
343
344 if (buff_copy) {
345 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
346 if (mem_init_done)
347 kfree(buff_copy);
348 }
349 return ret;
350}
351
352/* Given an RTAS status code of 990n compute the hinted delay of 10^n
353 * (last digit) milliseconds. For now we bound at n=5 (100 sec).
354 */
355unsigned int rtas_extended_busy_delay_time(int status)
356{
357 int order = status - 9900;
358 unsigned long ms;
359
360 if (order < 0)
361 order = 0; /* RTC depends on this for -2 clock busy */
362 else if (order > 5)
363 order = 5; /* bound */
364
365 /* Use microseconds for reasonable accuracy */
366 for (ms = 1; order > 0; order--)
367 ms *= 10;
368
369 return ms;
370}
371
372int rtas_error_rc(int rtas_rc)
373{
374 int rc;
375
376 switch (rtas_rc) {
377 case -1: /* Hardware Error */
378 rc = -EIO;
379 break;
380 case -3: /* Bad indicator/domain/etc */
381 rc = -EINVAL;
382 break;
383 case -9000: /* Isolation error */
384 rc = -EFAULT;
385 break;
386 case -9001: /* Outstanding TCE/PTE */
387 rc = -EEXIST;
388 break;
389 case -9002: /* No usable slot */
390 rc = -ENODEV;
391 break;
392 default:
393 printk(KERN_ERR "%s: unexpected RTAS error %d\n",
394 __FUNCTION__, rtas_rc);
395 rc = -ERANGE;
396 break;
397 }
398 return rc;
399}
400
401int rtas_get_power_level(int powerdomain, int *level)
402{
403 int token = rtas_token("get-power-level");
404 int rc;
405
406 if (token == RTAS_UNKNOWN_SERVICE)
407 return -ENOENT;
408
409 while ((rc = rtas_call(token, 1, 2, level, powerdomain)) == RTAS_BUSY)
410 udelay(1);
411
412 if (rc < 0)
413 return rtas_error_rc(rc);
414 return rc;
415}
416
417int rtas_set_power_level(int powerdomain, int level, int *setlevel)
418{
419 int token = rtas_token("set-power-level");
420 unsigned int wait_time;
421 int rc;
422
423 if (token == RTAS_UNKNOWN_SERVICE)
424 return -ENOENT;
425
426 while (1) {
427 rc = rtas_call(token, 2, 2, setlevel, powerdomain, level);
428 if (rc == RTAS_BUSY)
429 udelay(1);
430 else if (rtas_is_extended_busy(rc)) {
431 wait_time = rtas_extended_busy_delay_time(rc);
432 udelay(wait_time * 1000);
433 } else
434 break;
435 }
436
437 if (rc < 0)
438 return rtas_error_rc(rc);
439 return rc;
440}
441
442int rtas_get_sensor(int sensor, int index, int *state)
443{
444 int token = rtas_token("get-sensor-state");
445 unsigned int wait_time;
446 int rc;
447
448 if (token == RTAS_UNKNOWN_SERVICE)
449 return -ENOENT;
450
451 while (1) {
452 rc = rtas_call(token, 2, 2, state, sensor, index);
453 if (rc == RTAS_BUSY)
454 udelay(1);
455 else if (rtas_is_extended_busy(rc)) {
456 wait_time = rtas_extended_busy_delay_time(rc);
457 udelay(wait_time * 1000);
458 } else
459 break;
460 }
461
462 if (rc < 0)
463 return rtas_error_rc(rc);
464 return rc;
465}
466
467int rtas_set_indicator(int indicator, int index, int new_value)
468{
469 int token = rtas_token("set-indicator");
470 unsigned int wait_time;
471 int rc;
472
473 if (token == RTAS_UNKNOWN_SERVICE)
474 return -ENOENT;
475
476 while (1) {
477 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
478 if (rc == RTAS_BUSY)
479 udelay(1);
480 else if (rtas_is_extended_busy(rc)) {
481 wait_time = rtas_extended_busy_delay_time(rc);
482 udelay(wait_time * 1000);
483 }
484 else
485 break;
486 }
487
488 if (rc < 0)
489 return rtas_error_rc(rc);
490 return rc;
491}
492
493void rtas_restart(char *cmd)
494{
495 printk("RTAS system-reboot returned %d\n",
496 rtas_call(rtas_token("system-reboot"), 0, 1, NULL));
497 for (;;);
498}
499
500void rtas_power_off(void)
501{
502 /* allow power on only with power button press */
503 printk("RTAS power-off returned %d\n",
504 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
505 for (;;);
506}
507
508void rtas_halt(void)
509{
510 rtas_power_off();
511}
512
513/* Must be in the RMO region, so we place it here */
514static char rtas_os_term_buf[2048];
515
516void rtas_os_term(char *str)
517{
518 int status;
519
520 if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term"))
521 return;
522
523 snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
524
525 do {
526 status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
527 __pa(rtas_os_term_buf));
528
529 if (status == RTAS_BUSY)
530 udelay(1);
531 else if (status != 0)
532 printk(KERN_EMERG "ibm,os-term call failed %d\n",
533 status);
534 } while (status == RTAS_BUSY);
535}
536
537
538asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
539{
540 struct rtas_args args;
541 unsigned long flags;
542 char *buff_copy, *errbuf = NULL;
543 int nargs;
544
545 if (!capable(CAP_SYS_ADMIN))
546 return -EPERM;
547
548 if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
549 return -EFAULT;
550
551 nargs = args.nargs;
552 if (nargs > ARRAY_SIZE(args.args)
553 || args.nret > ARRAY_SIZE(args.args)
554 || nargs + args.nret > ARRAY_SIZE(args.args))
555 return -EINVAL;
556
557 /* Copy in args. */
558 if (copy_from_user(args.args, uargs->args,
559 nargs * sizeof(rtas_arg_t)) != 0)
560 return -EFAULT;
561
562 buff_copy = get_errorlog_buffer();
563
564 spin_lock_irqsave(&rtas.lock, flags);
565
566 rtas.args = args;
567 enter_rtas(__pa(&rtas.args));
568 args = rtas.args;
569
570 args.rets = &args.args[nargs];
571
572 /* A -1 return code indicates that the last command couldn't
573 be completed due to a hardware error. */
574 if (args.rets[0] == -1)
575 errbuf = __fetch_rtas_last_error(buff_copy);
576
577 spin_unlock_irqrestore(&rtas.lock, flags);
578
579 if (buff_copy) {
580 if (errbuf)
581 log_error(errbuf, ERR_TYPE_RTAS_LOG, 0);
582 kfree(buff_copy);
583 }
584
585 /* Copy out args. */
586 if (copy_to_user(uargs->args + nargs,
587 args.args + nargs,
588 args.nret * sizeof(rtas_arg_t)) != 0)
589 return -EFAULT;
590
591 return 0;
592}
593
594#ifdef CONFIG_SMP
595/* This version can't take the spinlock, because it never returns */
596
597struct rtas_args rtas_stop_self_args = {
598 /* The token is initialized for real in setup_system() */
599 .token = RTAS_UNKNOWN_SERVICE,
600 .nargs = 0,
601 .nret = 1,
602 .rets = &rtas_stop_self_args.args[0],
603};
604
605void rtas_stop_self(void)
606{
607 struct rtas_args *rtas_args = &rtas_stop_self_args;
608
609 local_irq_disable();
610
611 BUG_ON(rtas_args->token == RTAS_UNKNOWN_SERVICE);
612
613 printk("cpu %u (hwid %u) Ready to die...\n",
614 smp_processor_id(), hard_smp_processor_id());
615 enter_rtas(__pa(rtas_args));
616
617 panic("Alas, I survived.\n");
618}
619#endif
620
621/*
622 * Call early during boot, before mem init or bootmem, to retreive the RTAS
623 * informations from the device-tree and allocate the RMO buffer for userland
624 * accesses.
625 */
626void __init rtas_initialize(void)
627{
628 unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
629
630 /* Get RTAS dev node and fill up our "rtas" structure with infos
631 * about it.
632 */
633 rtas.dev = of_find_node_by_name(NULL, "rtas");
634 if (rtas.dev) {
635 u32 *basep, *entryp;
636 u32 *sizep;
637
638 basep = (u32 *)get_property(rtas.dev, "linux,rtas-base", NULL);
639 sizep = (u32 *)get_property(rtas.dev, "rtas-size", NULL);
640 if (basep != NULL && sizep != NULL) {
641 rtas.base = *basep;
642 rtas.size = *sizep;
643 entryp = (u32 *)get_property(rtas.dev, "linux,rtas-entry", NULL);
644 if (entryp == NULL) /* Ugh */
645 rtas.entry = rtas.base;
646 else
647 rtas.entry = *entryp;
648 } else
649 rtas.dev = NULL;
650 }
651 if (!rtas.dev)
652 return;
653
654 /* If RTAS was found, allocate the RMO buffer for it and look for
655 * the stop-self token if any
656 */
657#ifdef CONFIG_PPC64
658 if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
659 rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX);
660#endif
661 rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
662
663#ifdef CONFIG_HOTPLUG_CPU
664 rtas_stop_self_args.token = rtas_token("stop-self");
665#endif /* CONFIG_HOTPLUG_CPU */
666#ifdef CONFIG_RTAS_ERROR_LOGGING
667 rtas_last_error_token = rtas_token("rtas-last-error");
668#endif
669}
670
671
672EXPORT_SYMBOL(rtas_token);
673EXPORT_SYMBOL(rtas_call);
674EXPORT_SYMBOL(rtas_data_buf);
675EXPORT_SYMBOL(rtas_data_buf_lock);
676EXPORT_SYMBOL(rtas_extended_busy_delay_time);
677EXPORT_SYMBOL(rtas_get_sensor);
678EXPORT_SYMBOL(rtas_get_power_level);
679EXPORT_SYMBOL(rtas_set_power_level);
680EXPORT_SYMBOL(rtas_set_indicator);
diff --git a/arch/powerpc/kernel/semaphore.c b/arch/powerpc/kernel/semaphore.c
new file mode 100644
index 000000000000..2f8c3c951394
--- /dev/null
+++ b/arch/powerpc/kernel/semaphore.c
@@ -0,0 +1,135 @@
1/*
2 * PowerPC-specific semaphore code.
3 *
4 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
12 * to eliminate the SMP races in the old version between the updates
13 * of `count' and `waking'. Now we use negative `count' values to
14 * indicate that some process(es) are waiting for the semaphore.
15 */
16
17#include <linux/sched.h>
18#include <linux/init.h>
19#include <linux/module.h>
20
21#include <asm/atomic.h>
22#include <asm/semaphore.h>
23#include <asm/errno.h>
24
25/*
26 * Atomically update sem->count.
27 * This does the equivalent of the following:
28 *
29 * old_count = sem->count;
30 * tmp = MAX(old_count, 0) + incr;
31 * sem->count = tmp;
32 * return old_count;
33 */
34static inline int __sem_update_count(struct semaphore *sem, int incr)
35{
36 int old_count, tmp;
37
38 __asm__ __volatile__("\n"
39"1: lwarx %0,0,%3\n"
40" srawi %1,%0,31\n"
41" andc %1,%0,%1\n"
42" add %1,%1,%4\n"
43 PPC405_ERR77(0,%3)
44" stwcx. %1,0,%3\n"
45" bne 1b"
46 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
47 : "r" (&sem->count), "r" (incr), "m" (sem->count)
48 : "cc");
49
50 return old_count;
51}
52
53void __up(struct semaphore *sem)
54{
55 /*
56 * Note that we incremented count in up() before we came here,
57 * but that was ineffective since the result was <= 0, and
58 * any negative value of count is equivalent to 0.
59 * This ends up setting count to 1, unless count is now > 0
60 * (i.e. because some other cpu has called up() in the meantime),
61 * in which case we just increment count.
62 */
63 __sem_update_count(sem, 1);
64 wake_up(&sem->wait);
65}
66EXPORT_SYMBOL(__up);
67
68/*
69 * Note that when we come in to __down or __down_interruptible,
70 * we have already decremented count, but that decrement was
71 * ineffective since the result was < 0, and any negative value
72 * of count is equivalent to 0.
73 * Thus it is only when we decrement count from some value > 0
74 * that we have actually got the semaphore.
75 */
76void __sched __down(struct semaphore *sem)
77{
78 struct task_struct *tsk = current;
79 DECLARE_WAITQUEUE(wait, tsk);
80
81 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
82 add_wait_queue_exclusive(&sem->wait, &wait);
83
84 /*
85 * Try to get the semaphore. If the count is > 0, then we've
86 * got the semaphore; we decrement count and exit the loop.
87 * If the count is 0 or negative, we set it to -1, indicating
88 * that we are asleep, and then sleep.
89 */
90 while (__sem_update_count(sem, -1) <= 0) {
91 schedule();
92 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
93 }
94 remove_wait_queue(&sem->wait, &wait);
95 __set_task_state(tsk, TASK_RUNNING);
96
97 /*
98 * If there are any more sleepers, wake one of them up so
99 * that it can either get the semaphore, or set count to -1
100 * indicating that there are still processes sleeping.
101 */
102 wake_up(&sem->wait);
103}
104EXPORT_SYMBOL(__down);
105
106int __sched __down_interruptible(struct semaphore * sem)
107{
108 int retval = 0;
109 struct task_struct *tsk = current;
110 DECLARE_WAITQUEUE(wait, tsk);
111
112 __set_task_state(tsk, TASK_INTERRUPTIBLE);
113 add_wait_queue_exclusive(&sem->wait, &wait);
114
115 while (__sem_update_count(sem, -1) <= 0) {
116 if (signal_pending(current)) {
117 /*
118 * A signal is pending - give up trying.
119 * Set sem->count to 0 if it is negative,
120 * since we are no longer sleeping.
121 */
122 __sem_update_count(sem, 0);
123 retval = -EINTR;
124 break;
125 }
126 schedule();
127 set_task_state(tsk, TASK_INTERRUPTIBLE);
128 }
129 remove_wait_queue(&sem->wait, &wait);
130 __set_task_state(tsk, TASK_RUNNING);
131
132 wake_up(&sem->wait);
133 return retval;
134}
135EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
new file mode 100644
index 000000000000..1292460fcde2
--- /dev/null
+++ b/arch/powerpc/kernel/setup-common.c
@@ -0,0 +1,410 @@
1/*
2 * Common boot and setup code for both 32-bit and 64-bit.
3 * Extracted from arch/powerpc/kernel/setup_64.c.
4 *
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/string.h>
15#include <linux/sched.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/reboot.h>
19#include <linux/delay.h>
20#include <linux/initrd.h>
21#include <linux/ide.h>
22#include <linux/seq_file.h>
23#include <linux/ioport.h>
24#include <linux/console.h>
25#include <linux/utsname.h>
26#include <linux/tty.h>
27#include <linux/root_dev.h>
28#include <linux/notifier.h>
29#include <linux/cpu.h>
30#include <linux/unistd.h>
31#include <linux/serial.h>
32#include <linux/serial_8250.h>
33#include <asm/io.h>
34#include <asm/prom.h>
35#include <asm/processor.h>
36#include <asm/pgtable.h>
37#include <asm/smp.h>
38#include <asm/elf.h>
39#include <asm/machdep.h>
40#include <asm/time.h>
41#include <asm/cputable.h>
42#include <asm/sections.h>
43#include <asm/btext.h>
44#include <asm/nvram.h>
45#include <asm/setup.h>
46#include <asm/system.h>
47#include <asm/rtas.h>
48#include <asm/iommu.h>
49#include <asm/serial.h>
50#include <asm/cache.h>
51#include <asm/page.h>
52#include <asm/mmu.h>
53#include <asm/lmb.h>
54
55#undef DEBUG
56
57#ifdef DEBUG
58#define DBG(fmt...) udbg_printf(fmt)
59#else
60#define DBG(fmt...)
61#endif
62
63/*
64 * This still seems to be needed... -- paulus
65 */
66struct screen_info screen_info = {
67 .orig_x = 0,
68 .orig_y = 25,
69 .orig_video_cols = 80,
70 .orig_video_lines = 25,
71 .orig_video_isVGA = 1,
72 .orig_video_points = 16
73};
74
75#ifdef __DO_IRQ_CANON
76/* XXX should go elsewhere eventually */
77int ppc_do_canonicalize_irqs;
78EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
79#endif
80
81/* also used by kexec */
82void machine_shutdown(void)
83{
84 if (ppc_md.nvram_sync)
85 ppc_md.nvram_sync();
86}
87
88void machine_restart(char *cmd)
89{
90 machine_shutdown();
91 ppc_md.restart(cmd);
92#ifdef CONFIG_SMP
93 smp_send_stop();
94#endif
95 printk(KERN_EMERG "System Halted, OK to turn off power\n");
96 local_irq_disable();
97 while (1) ;
98}
99
100void machine_power_off(void)
101{
102 machine_shutdown();
103 ppc_md.power_off();
104#ifdef CONFIG_SMP
105 smp_send_stop();
106#endif
107 printk(KERN_EMERG "System Halted, OK to turn off power\n");
108 local_irq_disable();
109 while (1) ;
110}
111/* Used by the G5 thermal driver */
112EXPORT_SYMBOL_GPL(machine_power_off);
113
114void (*pm_power_off)(void) = machine_power_off;
115EXPORT_SYMBOL_GPL(pm_power_off);
116
117void machine_halt(void)
118{
119 machine_shutdown();
120 ppc_md.halt();
121#ifdef CONFIG_SMP
122 smp_send_stop();
123#endif
124 printk(KERN_EMERG "System Halted, OK to turn off power\n");
125 local_irq_disable();
126 while (1) ;
127}
128
129
130#ifdef CONFIG_TAU
131extern u32 cpu_temp(unsigned long cpu);
132extern u32 cpu_temp_both(unsigned long cpu);
133#endif /* CONFIG_TAU */
134
135#ifdef CONFIG_SMP
136DEFINE_PER_CPU(unsigned int, pvr);
137#endif
138
139static int show_cpuinfo(struct seq_file *m, void *v)
140{
141 unsigned long cpu_id = (unsigned long)v - 1;
142 unsigned int pvr;
143 unsigned short maj;
144 unsigned short min;
145
146 if (cpu_id == NR_CPUS) {
147#if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
148 unsigned long bogosum = 0;
149 int i;
150 for (i = 0; i < NR_CPUS; ++i)
151 if (cpu_online(i))
152 bogosum += loops_per_jiffy;
153 seq_printf(m, "total bogomips\t: %lu.%02lu\n",
154 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
155#endif /* CONFIG_SMP && CONFIG_PPC32 */
156 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
157
158 if (ppc_md.show_cpuinfo != NULL)
159 ppc_md.show_cpuinfo(m);
160
161 return 0;
162 }
163
164 /* We only show online cpus: disable preempt (overzealous, I
165 * knew) to prevent cpu going down. */
166 preempt_disable();
167 if (!cpu_online(cpu_id)) {
168 preempt_enable();
169 return 0;
170 }
171
172#ifdef CONFIG_SMP
173#ifdef CONFIG_PPC64 /* XXX for now */
174 pvr = per_cpu(pvr, cpu_id);
175#else
176 pvr = cpu_data[cpu_id].pvr;
177#endif
178#else
179 pvr = mfspr(SPRN_PVR);
180#endif
181 maj = (pvr >> 8) & 0xFF;
182 min = pvr & 0xFF;
183
184 seq_printf(m, "processor\t: %lu\n", cpu_id);
185 seq_printf(m, "cpu\t\t: ");
186
187 if (cur_cpu_spec->pvr_mask)
188 seq_printf(m, "%s", cur_cpu_spec->cpu_name);
189 else
190 seq_printf(m, "unknown (%08x)", pvr);
191
192#ifdef CONFIG_ALTIVEC
193 if (cpu_has_feature(CPU_FTR_ALTIVEC))
194 seq_printf(m, ", altivec supported");
195#endif /* CONFIG_ALTIVEC */
196
197 seq_printf(m, "\n");
198
199#ifdef CONFIG_TAU
200 if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
201#ifdef CONFIG_TAU_AVERAGE
202 /* more straightforward, but potentially misleading */
203 seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
204 cpu_temp(i));
205#else
206 /* show the actual temp sensor range */
207 u32 temp;
208 temp = cpu_temp_both(i);
209 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
210 temp & 0xff, temp >> 16);
211#endif
212 }
213#endif /* CONFIG_TAU */
214
215 /*
216 * Assume here that all clock rates are the same in a
217 * smp system. -- Cort
218 */
219 if (ppc_proc_freq)
220 seq_printf(m, "clock\t\t: %lu.%06luMHz\n",
221 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
222
223 if (ppc_md.show_percpuinfo != NULL)
224 ppc_md.show_percpuinfo(m, cpu_id);
225
226 /* If we are a Freescale core do a simple check so
227 * we dont have to keep adding cases in the future */
228 if (PVR_VER(pvr) & 0x8000) {
229 maj = PVR_MAJ(pvr);
230 min = PVR_MIN(pvr);
231 } else {
232 switch (PVR_VER(pvr)) {
233 case 0x0020: /* 403 family */
234 maj = PVR_MAJ(pvr) + 1;
235 min = PVR_MIN(pvr);
236 break;
237 case 0x1008: /* 740P/750P ?? */
238 maj = ((pvr >> 8) & 0xFF) - 1;
239 min = pvr & 0xFF;
240 break;
241 default:
242 maj = (pvr >> 8) & 0xFF;
243 min = pvr & 0xFF;
244 break;
245 }
246 }
247
248 seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
249 maj, min, PVR_VER(pvr), PVR_REV(pvr));
250
251#ifdef CONFIG_PPC32
252 seq_printf(m, "bogomips\t: %lu.%02lu\n",
253 loops_per_jiffy / (500000/HZ),
254 (loops_per_jiffy / (5000/HZ)) % 100);
255#endif
256
257#ifdef CONFIG_SMP
258 seq_printf(m, "\n");
259#endif
260
261 preempt_enable();
262 return 0;
263}
264
265static void *c_start(struct seq_file *m, loff_t *pos)
266{
267 unsigned long i = *pos;
268
269 return i <= NR_CPUS ? (void *)(i + 1) : NULL;
270}
271
272static void *c_next(struct seq_file *m, void *v, loff_t *pos)
273{
274 ++*pos;
275 return c_start(m, pos);
276}
277
278static void c_stop(struct seq_file *m, void *v)
279{
280}
281
282struct seq_operations cpuinfo_op = {
283 .start =c_start,
284 .next = c_next,
285 .stop = c_stop,
286 .show = show_cpuinfo,
287};
288
289#ifdef CONFIG_PPC_MULTIPLATFORM
290static int __init set_preferred_console(void)
291{
292 struct device_node *prom_stdout = NULL;
293 char *name;
294 u32 *spd;
295 int offset = 0;
296
297 DBG(" -> set_preferred_console()\n");
298
299 /* The user has requested a console so this is already set up. */
300 if (strstr(saved_command_line, "console=")) {
301 DBG(" console was specified !\n");
302 return -EBUSY;
303 }
304
305 if (!of_chosen) {
306 DBG(" of_chosen is NULL !\n");
307 return -ENODEV;
308 }
309 /* We are getting a weird phandle from OF ... */
310 /* ... So use the full path instead */
311 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
312 if (name == NULL) {
313 DBG(" no linux,stdout-path !\n");
314 return -ENODEV;
315 }
316 prom_stdout = of_find_node_by_path(name);
317 if (!prom_stdout) {
318 DBG(" can't find stdout package %s !\n", name);
319 return -ENODEV;
320 }
321 DBG("stdout is %s\n", prom_stdout->full_name);
322
323 name = (char *)get_property(prom_stdout, "name", NULL);
324 if (!name) {
325 DBG(" stdout package has no name !\n");
326 goto not_found;
327 }
328 spd = (u32 *)get_property(prom_stdout, "current-speed", NULL);
329
330 if (0)
331 ;
332#ifdef CONFIG_SERIAL_8250_CONSOLE
333 else if (strcmp(name, "serial") == 0) {
334 int i;
335 u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i);
336 if (i > 8) {
337 switch (reg[1]) {
338 case 0x3f8:
339 offset = 0;
340 break;
341 case 0x2f8:
342 offset = 1;
343 break;
344 case 0x898:
345 offset = 2;
346 break;
347 case 0x890:
348 offset = 3;
349 break;
350 default:
351 /* We dont recognise the serial port */
352 goto not_found;
353 }
354 }
355 }
356#endif /* CONFIG_SERIAL_8250_CONSOLE */
357#ifdef CONFIG_PPC_PSERIES
358 else if (strcmp(name, "vty") == 0) {
359 u32 *reg = (u32 *)get_property(prom_stdout, "reg", NULL);
360 char *compat = (char *)get_property(prom_stdout, "compatible", NULL);
361
362 if (reg && compat && (strcmp(compat, "hvterm-protocol") == 0)) {
363 /* Host Virtual Serial Interface */
364 switch (reg[0]) {
365 case 0x30000000:
366 offset = 0;
367 break;
368 case 0x30000001:
369 offset = 1;
370 break;
371 default:
372 goto not_found;
373 }
374 of_node_put(prom_stdout);
375 DBG("Found hvsi console at offset %d\n", offset);
376 return add_preferred_console("hvsi", offset, NULL);
377 } else {
378 /* pSeries LPAR virtual console */
379 of_node_put(prom_stdout);
380 DBG("Found hvc console\n");
381 return add_preferred_console("hvc", 0, NULL);
382 }
383 }
384#endif /* CONFIG_PPC_PSERIES */
385#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
386 else if (strcmp(name, "ch-a") == 0)
387 offset = 0;
388 else if (strcmp(name, "ch-b") == 0)
389 offset = 1;
390#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
391 else
392 goto not_found;
393 of_node_put(prom_stdout);
394
395 DBG("Found serial console at ttyS%d\n", offset);
396
397 if (spd) {
398 static char __initdata opt[16];
399 sprintf(opt, "%d", *spd);
400 return add_preferred_console("ttyS", offset, opt);
401 } else
402 return add_preferred_console("ttyS", offset, NULL);
403
404 not_found:
405 DBG("No preferred console found !\n");
406 of_node_put(prom_stdout);
407 return -ENODEV;
408}
409console_initcall(set_preferred_console);
410#endif /* CONFIG_PPC_MULTIPLATFORM */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
new file mode 100644
index 000000000000..9680ae99b084
--- /dev/null
+++ b/arch/powerpc/kernel/setup_32.c
@@ -0,0 +1,372 @@
1/*
2 * Common prep/pmac/chrp boot and setup code.
3 */
4
5#include <linux/config.h>
6#include <linux/module.h>
7#include <linux/string.h>
8#include <linux/sched.h>
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/reboot.h>
12#include <linux/delay.h>
13#include <linux/initrd.h>
14#include <linux/ide.h>
15#include <linux/tty.h>
16#include <linux/bootmem.h>
17#include <linux/seq_file.h>
18#include <linux/root_dev.h>
19#include <linux/cpu.h>
20#include <linux/console.h>
21
22#include <asm/residual.h>
23#include <asm/io.h>
24#include <asm/prom.h>
25#include <asm/processor.h>
26#include <asm/pgtable.h>
27#include <asm/setup.h>
28#include <asm/amigappc.h>
29#include <asm/smp.h>
30#include <asm/elf.h>
31#include <asm/cputable.h>
32#include <asm/bootx.h>
33#include <asm/btext.h>
34#include <asm/machdep.h>
35#include <asm/uaccess.h>
36#include <asm/system.h>
37#include <asm/pmac_feature.h>
38#include <asm/sections.h>
39#include <asm/nvram.h>
40#include <asm/xmon.h>
41#include <asm/time.h>
42
43#define DBG(fmt...)
44
45#if defined CONFIG_KGDB
46#include <asm/kgdb.h>
47#endif
48
49extern void platform_init(void);
50extern void bootx_init(unsigned long r4, unsigned long phys);
51
52extern void ppc6xx_idle(void);
53extern void power4_idle(void);
54
55boot_infos_t *boot_infos;
56struct ide_machdep_calls ppc_ide_md;
57
58/* XXX should go elsewhere */
59int __irq_offset_value;
60EXPORT_SYMBOL(__irq_offset_value);
61
62int boot_cpuid;
63EXPORT_SYMBOL_GPL(boot_cpuid);
64int boot_cpuid_phys;
65
66unsigned long ISA_DMA_THRESHOLD;
67unsigned int DMA_MODE_READ;
68unsigned int DMA_MODE_WRITE;
69
70int have_of = 1;
71
72#ifdef CONFIG_PPC_MULTIPLATFORM
73int _machine = 0;
74
75extern void prep_init(void);
76extern void pmac_init(void);
77extern void chrp_init(void);
78
79dev_t boot_dev;
80#endif /* CONFIG_PPC_MULTIPLATFORM */
81
82#ifdef CONFIG_MAGIC_SYSRQ
83unsigned long SYSRQ_KEY = 0x54;
84#endif /* CONFIG_MAGIC_SYSRQ */
85
86#ifdef CONFIG_VGA_CONSOLE
87unsigned long vgacon_remap_base;
88#endif
89
90struct machdep_calls ppc_md;
91EXPORT_SYMBOL(ppc_md);
92
93/*
94 * These are used in binfmt_elf.c to put aux entries on the stack
95 * for each elf executable being started.
96 */
97int dcache_bsize;
98int icache_bsize;
99int ucache_bsize;
100
101/*
102 * We're called here very early in the boot. We determine the machine
103 * type and call the appropriate low-level setup functions.
104 * -- Cort <cort@fsmlabs.com>
105 *
106 * Note that the kernel may be running at an address which is different
107 * from the address that it was linked at, so we must use RELOC/PTRRELOC
108 * to access static data (including strings). -- paulus
109 */
110unsigned long __init early_init(unsigned long dt_ptr)
111{
112 unsigned long offset = reloc_offset();
113
114 /* First zero the BSS -- use memset_io, some platforms don't have
115 * caches on yet */
116 memset_io(PTRRELOC(&__bss_start), 0, _end - __bss_start);
117
118 /*
119 * Identify the CPU type and fix up code sections
120 * that depend on which cpu we have.
121 */
122 identify_cpu(offset, 0);
123 do_cpu_ftr_fixups(offset);
124
125 return KERNELBASE + offset;
126}
127
128#ifdef CONFIG_PPC_MULTIPLATFORM
129/*
130 * The PPC_MULTIPLATFORM version of platform_init...
131 */
132void __init platform_init(void)
133{
134 /* if we didn't get any bootinfo telling us what we are... */
135 if (_machine == 0) {
136 /* prep boot loader tells us if we're prep or not */
137 if ( *(unsigned long *)(KERNELBASE) == (0xdeadc0de) )
138 _machine = _MACH_prep;
139 }
140
141#ifdef CONFIG_PPC_PREP
142 /* not much more to do here, if prep */
143 if (_machine == _MACH_prep) {
144 prep_init();
145 return;
146 }
147#endif
148
149#ifdef CONFIG_ADB
150 if (strstr(cmd_line, "adb_sync")) {
151 extern int __adb_probe_sync;
152 __adb_probe_sync = 1;
153 }
154#endif /* CONFIG_ADB */
155
156 switch (_machine) {
157#ifdef CONFIG_PPC_PMAC
158 case _MACH_Pmac:
159 pmac_init();
160 break;
161#endif
162#ifdef CONFIG_PPC_CHRP
163 case _MACH_chrp:
164 chrp_init();
165 break;
166#endif
167 }
168}
169#endif
170
171/*
172 * Find out what kind of machine we're on and save any data we need
173 * from the early boot process (devtree is copied on pmac by prom_init()).
174 * This is called very early on the boot process, after a minimal
175 * MMU environment has been set up but before MMU_init is called.
176 */
177void __init machine_init(unsigned long dt_ptr, unsigned long phys)
178{
179 early_init_devtree(__va(dt_ptr));
180
181#ifdef CONFIG_CMDLINE
182 strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
183#endif /* CONFIG_CMDLINE */
184
185 platform_init();
186
187#ifdef CONFIG_6xx
188 ppc_md.power_save = ppc6xx_idle;
189#endif
190
191 if (ppc_md.progress)
192 ppc_md.progress("id mach(): done", 0x200);
193}
194
195#ifdef CONFIG_BOOKE_WDT
196/* Checks wdt=x and wdt_period=xx command-line option */
197int __init early_parse_wdt(char *p)
198{
199 if (p && strncmp(p, "0", 1) != 0)
200 booke_wdt_enabled = 1;
201
202 return 0;
203}
204early_param("wdt", early_parse_wdt);
205
206int __init early_parse_wdt_period (char *p)
207{
208 if (p)
209 booke_wdt_period = simple_strtoul(p, NULL, 0);
210
211 return 0;
212}
213early_param("wdt_period", early_parse_wdt_period);
214#endif /* CONFIG_BOOKE_WDT */
215
216/* Checks "l2cr=xxxx" command-line option */
217int __init ppc_setup_l2cr(char *str)
218{
219 if (cpu_has_feature(CPU_FTR_L2CR)) {
220 unsigned long val = simple_strtoul(str, NULL, 0);
221 printk(KERN_INFO "l2cr set to %lx\n", val);
222 _set_L2CR(0); /* force invalidate by disable cache */
223 _set_L2CR(val); /* and enable it */
224 }
225 return 1;
226}
227__setup("l2cr=", ppc_setup_l2cr);
228
229#ifdef CONFIG_GENERIC_NVRAM
230
231/* Generic nvram hooks used by drivers/char/gen_nvram.c */
232unsigned char nvram_read_byte(int addr)
233{
234 if (ppc_md.nvram_read_val)
235 return ppc_md.nvram_read_val(addr);
236 return 0xff;
237}
238EXPORT_SYMBOL(nvram_read_byte);
239
240void nvram_write_byte(unsigned char val, int addr)
241{
242 if (ppc_md.nvram_write_val)
243 ppc_md.nvram_write_val(addr, val);
244}
245EXPORT_SYMBOL(nvram_write_byte);
246
247void nvram_sync(void)
248{
249 if (ppc_md.nvram_sync)
250 ppc_md.nvram_sync();
251}
252EXPORT_SYMBOL(nvram_sync);
253
254#endif /* CONFIG_NVRAM */
255
256static struct cpu cpu_devices[NR_CPUS];
257
258int __init ppc_init(void)
259{
260 int i;
261
262 /* clear the progress line */
263 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
264
265 /* register CPU devices */
266 for (i = 0; i < NR_CPUS; i++)
267 if (cpu_possible(i))
268 register_cpu(&cpu_devices[i], i, NULL);
269
270 /* call platform init */
271 if (ppc_md.init != NULL) {
272 ppc_md.init();
273 }
274 return 0;
275}
276
277arch_initcall(ppc_init);
278
279/* Warning, IO base is not yet inited */
280void __init setup_arch(char **cmdline_p)
281{
282 extern char *klimit;
283 extern void do_init_bootmem(void);
284
285 /* so udelay does something sensible, assume <= 1000 bogomips */
286 loops_per_jiffy = 500000000 / HZ;
287
288 unflatten_device_tree();
289 finish_device_tree();
290
291#ifdef CONFIG_BOOTX_TEXT
292 init_boot_display();
293#endif
294
295#ifdef CONFIG_PPC_PMAC
296 /* This could be called "early setup arch", it must be done
297 * now because xmon need it
298 */
299 if (_machine == _MACH_Pmac)
300 pmac_feature_init(); /* New cool way */
301#endif
302
303#ifdef CONFIG_XMON
304 xmon_map_scc();
305 if (strstr(cmd_line, "xmon")) {
306 xmon_init(1);
307 debugger(NULL);
308 }
309#endif /* CONFIG_XMON */
310 if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab);
311
312#if defined(CONFIG_KGDB)
313 if (ppc_md.kgdb_map_scc)
314 ppc_md.kgdb_map_scc();
315 set_debug_traps();
316 if (strstr(cmd_line, "gdb")) {
317 if (ppc_md.progress)
318 ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000);
319 printk("kgdb breakpoint activated\n");
320 breakpoint();
321 }
322#endif
323
324 /*
325 * Set cache line size based on type of cpu as a default.
326 * Systems with OF can look in the properties on the cpu node(s)
327 * for a possibly more accurate value.
328 */
329 if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) {
330 dcache_bsize = cur_cpu_spec->dcache_bsize;
331 icache_bsize = cur_cpu_spec->icache_bsize;
332 ucache_bsize = 0;
333 } else
334 ucache_bsize = dcache_bsize = icache_bsize
335 = cur_cpu_spec->dcache_bsize;
336
337 /* reboot on panic */
338 panic_timeout = 180;
339
340 init_mm.start_code = PAGE_OFFSET;
341 init_mm.end_code = (unsigned long) _etext;
342 init_mm.end_data = (unsigned long) _edata;
343 init_mm.brk = (unsigned long) klimit;
344
345 /* Save unparsed command line copy for /proc/cmdline */
346 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
347 *cmdline_p = cmd_line;
348
349 parse_early_param();
350
351 /* set up the bootmem stuff with available memory */
352 do_init_bootmem();
353 if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab);
354
355#ifdef CONFIG_PPC_OCP
356 /* Initialize OCP device list */
357 ocp_early_init();
358 if ( ppc_md.progress ) ppc_md.progress("ocp: exit", 0x3eab);
359#endif
360
361#ifdef CONFIG_DUMMY_CONSOLE
362 conswitchp = &dummy_con;
363#endif
364
365 ppc_md.setup_arch();
366 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
367
368 paging_init();
369
370 /* this is for modules since _machine can be a define -- Cort */
371 ppc_md.ppc_machine = _machine;
372}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
new file mode 100644
index 000000000000..40c48100bf1b
--- /dev/null
+++ b/arch/powerpc/kernel/setup_64.c
@@ -0,0 +1,1028 @@
1/*
2 *
3 * Common boot and setup code.
4 *
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#undef DEBUG
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/string.h>
18#include <linux/sched.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/reboot.h>
22#include <linux/delay.h>
23#include <linux/initrd.h>
24#include <linux/ide.h>
25#include <linux/seq_file.h>
26#include <linux/ioport.h>
27#include <linux/console.h>
28#include <linux/utsname.h>
29#include <linux/tty.h>
30#include <linux/root_dev.h>
31#include <linux/notifier.h>
32#include <linux/cpu.h>
33#include <linux/unistd.h>
34#include <linux/serial.h>
35#include <linux/serial_8250.h>
36#include <asm/io.h>
37#include <asm/prom.h>
38#include <asm/processor.h>
39#include <asm/pgtable.h>
40#include <asm/smp.h>
41#include <asm/elf.h>
42#include <asm/machdep.h>
43#include <asm/paca.h>
44#include <asm/ppcdebug.h>
45#include <asm/time.h>
46#include <asm/cputable.h>
47#include <asm/sections.h>
48#include <asm/btext.h>
49#include <asm/nvram.h>
50#include <asm/setup.h>
51#include <asm/system.h>
52#include <asm/rtas.h>
53#include <asm/iommu.h>
54#include <asm/serial.h>
55#include <asm/cache.h>
56#include <asm/page.h>
57#include <asm/mmu.h>
58#include <asm/lmb.h>
59#include <asm/iSeries/ItLpNaca.h>
60#include <asm/firmware.h>
61#include <asm/systemcfg.h>
62#include <asm/xmon.h>
63
64#ifdef DEBUG
65#define DBG(fmt...) udbg_printf(fmt)
66#else
67#define DBG(fmt...)
68#endif
69
70/*
71 * Here are some early debugging facilities. You can enable one
72 * but your kernel will not boot on anything else if you do so
73 */
74
75/* This one is for use on LPAR machines that support an HVC console
76 * on vterm 0
77 */
78extern void udbg_init_debug_lpar(void);
79/* This one is for use on Apple G5 machines
80 */
81extern void udbg_init_pmac_realmode(void);
82/* That's RTAS panel debug */
83extern void call_rtas_display_status_delay(unsigned char c);
84/* Here's maple real mode debug */
85extern void udbg_init_maple_realmode(void);
86
87#define EARLY_DEBUG_INIT() do {} while(0)
88
89#if 0
90#define EARLY_DEBUG_INIT() udbg_init_debug_lpar()
91#define EARLY_DEBUG_INIT() udbg_init_maple_realmode()
92#define EARLY_DEBUG_INIT() udbg_init_pmac_realmode()
93#define EARLY_DEBUG_INIT() \
94 do { udbg_putc = call_rtas_display_status_delay; } while(0)
95#endif
96
97/* extern void *stab; */
98extern unsigned long klimit;
99
100extern void mm_init_ppc64(void);
101extern void stab_initialize(unsigned long stab);
102extern void htab_initialize(void);
103extern void early_init_devtree(void *flat_dt);
104extern void unflatten_device_tree(void);
105
106extern void smp_release_cpus(void);
107
108int have_of = 1;
109int boot_cpuid = 0;
110int boot_cpuid_phys = 0;
111dev_t boot_dev;
112u64 ppc64_pft_size;
113
114struct ppc64_caches ppc64_caches;
115EXPORT_SYMBOL_GPL(ppc64_caches);
116
117/*
118 * These are used in binfmt_elf.c to put aux entries on the stack
119 * for each elf executable being started.
120 */
121int dcache_bsize;
122int icache_bsize;
123int ucache_bsize;
124
125/* The main machine-dep calls structure
126 */
127struct machdep_calls ppc_md;
128EXPORT_SYMBOL(ppc_md);
129
130#ifdef CONFIG_MAGIC_SYSRQ
131unsigned long SYSRQ_KEY;
132#endif /* CONFIG_MAGIC_SYSRQ */
133
134
135static int ppc64_panic_event(struct notifier_block *, unsigned long, void *);
136static struct notifier_block ppc64_panic_block = {
137 .notifier_call = ppc64_panic_event,
138 .priority = INT_MIN /* may not return; must be done last */
139};
140
141#ifdef CONFIG_SMP
142
143static int smt_enabled_cmdline;
144
145/* Look for ibm,smt-enabled OF option */
146static void check_smt_enabled(void)
147{
148 struct device_node *dn;
149 char *smt_option;
150
151 /* Allow the command line to overrule the OF option */
152 if (smt_enabled_cmdline)
153 return;
154
155 dn = of_find_node_by_path("/options");
156
157 if (dn) {
158 smt_option = (char *)get_property(dn, "ibm,smt-enabled", NULL);
159
160 if (smt_option) {
161 if (!strcmp(smt_option, "on"))
162 smt_enabled_at_boot = 1;
163 else if (!strcmp(smt_option, "off"))
164 smt_enabled_at_boot = 0;
165 }
166 }
167}
168
169/* Look for smt-enabled= cmdline option */
170static int __init early_smt_enabled(char *p)
171{
172 smt_enabled_cmdline = 1;
173
174 if (!p)
175 return 0;
176
177 if (!strcmp(p, "on") || !strcmp(p, "1"))
178 smt_enabled_at_boot = 1;
179 else if (!strcmp(p, "off") || !strcmp(p, "0"))
180 smt_enabled_at_boot = 0;
181
182 return 0;
183}
184early_param("smt-enabled", early_smt_enabled);
185
186/**
187 * setup_cpu_maps - initialize the following cpu maps:
188 * cpu_possible_map
189 * cpu_present_map
190 * cpu_sibling_map
191 *
192 * Having the possible map set up early allows us to restrict allocations
193 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
194 *
195 * We do not initialize the online map here; cpus set their own bits in
196 * cpu_online_map as they come up.
197 *
198 * This function is valid only for Open Firmware systems. finish_device_tree
199 * must be called before using this.
200 *
201 * While we're here, we may as well set the "physical" cpu ids in the paca.
202 */
203static void __init setup_cpu_maps(void)
204{
205 struct device_node *dn = NULL;
206 int cpu = 0;
207 int swap_cpuid = 0;
208
209 check_smt_enabled();
210
211 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
212 u32 *intserv;
213 int j, len = sizeof(u32), nthreads;
214
215 intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s",
216 &len);
217 if (!intserv)
218 intserv = (u32 *)get_property(dn, "reg", NULL);
219
220 nthreads = len / sizeof(u32);
221
222 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
223 cpu_set(cpu, cpu_present_map);
224 set_hard_smp_processor_id(cpu, intserv[j]);
225
226 if (intserv[j] == boot_cpuid_phys)
227 swap_cpuid = cpu;
228 cpu_set(cpu, cpu_possible_map);
229 cpu++;
230 }
231 }
232
233 /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that
234 * boot cpu is logical 0.
235 */
236 if (boot_cpuid_phys != get_hard_smp_processor_id(0)) {
237 u32 tmp;
238 tmp = get_hard_smp_processor_id(0);
239 set_hard_smp_processor_id(0, boot_cpuid_phys);
240 set_hard_smp_processor_id(swap_cpuid, tmp);
241 }
242
243 /*
244 * On pSeries LPAR, we need to know how many cpus
245 * could possibly be added to this partition.
246 */
247 if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
248 (dn = of_find_node_by_path("/rtas"))) {
249 int num_addr_cell, num_size_cell, maxcpus;
250 unsigned int *ireg;
251
252 num_addr_cell = prom_n_addr_cells(dn);
253 num_size_cell = prom_n_size_cells(dn);
254
255 ireg = (unsigned int *)
256 get_property(dn, "ibm,lrdr-capacity", NULL);
257
258 if (!ireg)
259 goto out;
260
261 maxcpus = ireg[num_addr_cell + num_size_cell];
262
263 /* Double maxcpus for processors which have SMT capability */
264 if (cpu_has_feature(CPU_FTR_SMT))
265 maxcpus *= 2;
266
267 if (maxcpus > NR_CPUS) {
268 printk(KERN_WARNING
269 "Partition configured for %d cpus, "
270 "operating system maximum is %d.\n",
271 maxcpus, NR_CPUS);
272 maxcpus = NR_CPUS;
273 } else
274 printk(KERN_INFO "Partition configured for %d cpus.\n",
275 maxcpus);
276
277 for (cpu = 0; cpu < maxcpus; cpu++)
278 cpu_set(cpu, cpu_possible_map);
279 out:
280 of_node_put(dn);
281 }
282
283 /*
284 * Do the sibling map; assume only two threads per processor.
285 */
286 for_each_cpu(cpu) {
287 cpu_set(cpu, cpu_sibling_map[cpu]);
288 if (cpu_has_feature(CPU_FTR_SMT))
289 cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
290 }
291
292 systemcfg->processorCount = num_present_cpus();
293}
294#endif /* CONFIG_SMP */
295
296extern struct machdep_calls pSeries_md;
297extern struct machdep_calls pmac_md;
298extern struct machdep_calls maple_md;
299extern struct machdep_calls bpa_md;
300extern struct machdep_calls iseries_md;
301
302/* Ultimately, stuff them in an elf section like initcalls... */
303static struct machdep_calls __initdata *machines[] = {
304#ifdef CONFIG_PPC_PSERIES
305 &pSeries_md,
306#endif /* CONFIG_PPC_PSERIES */
307#ifdef CONFIG_PPC_PMAC
308 &pmac_md,
309#endif /* CONFIG_PPC_PMAC */
310#ifdef CONFIG_PPC_MAPLE
311 &maple_md,
312#endif /* CONFIG_PPC_MAPLE */
313#ifdef CONFIG_PPC_BPA
314 &bpa_md,
315#endif
316#ifdef CONFIG_PPC_ISERIES
317 &iseries_md,
318#endif
319 NULL
320};
321
322/*
323 * Early initialization entry point. This is called by head.S
324 * with MMU translation disabled. We rely on the "feature" of
325 * the CPU that ignores the top 2 bits of the address in real
326 * mode so we can access kernel globals normally provided we
327 * only toy with things in the RMO region. From here, we do
328 * some early parsing of the device-tree to setup out LMB
329 * data structures, and allocate & initialize the hash table
330 * and segment tables so we can start running with translation
331 * enabled.
332 *
333 * It is this function which will call the probe() callback of
334 * the various platform types and copy the matching one to the
335 * global ppc_md structure. Your platform can eventually do
336 * some very early initializations from the probe() routine, but
337 * this is not recommended, be very careful as, for example, the
338 * device-tree is not accessible via normal means at this point.
339 */
340
341void __init early_setup(unsigned long dt_ptr)
342{
343 struct paca_struct *lpaca = get_paca();
344 static struct machdep_calls **mach;
345
346 /*
347 * Enable early debugging if any specified (see top of
348 * this file)
349 */
350 EARLY_DEBUG_INIT();
351
352 DBG(" -> early_setup()\n");
353
354 /*
355 * Fill the default DBG level (do we want to keep
356 * that old mecanism around forever ?)
357 */
358 ppcdbg_initialize();
359
360 /*
361 * Do early initializations using the flattened device
362 * tree, like retreiving the physical memory map or
363 * calculating/retreiving the hash table size
364 */
365 early_init_devtree(__va(dt_ptr));
366
367 /*
368 * Iterate all ppc_md structures until we find the proper
369 * one for the current machine type
370 */
371 DBG("Probing machine type for platform %x...\n",
372 systemcfg->platform);
373
374 for (mach = machines; *mach; mach++) {
375 if ((*mach)->probe(systemcfg->platform))
376 break;
377 }
378 /* What can we do if we didn't find ? */
379 if (*mach == NULL) {
380 DBG("No suitable machine found !\n");
381 for (;;);
382 }
383 ppc_md = **mach;
384
385 DBG("Found, Initializing memory management...\n");
386
387 /*
388 * Initialize stab / SLB management
389 */
390 if (!firmware_has_feature(FW_FEATURE_ISERIES))
391 stab_initialize(lpaca->stab_real);
392
393 /*
394 * Initialize the MMU Hash table and create the linear mapping
395 * of memory
396 */
397 htab_initialize();
398
399 DBG(" <- early_setup()\n");
400}
401
402
403/*
404 * Initialize some remaining members of the ppc64_caches and systemcfg structures
405 * (at least until we get rid of them completely). This is mostly some
406 * cache informations about the CPU that will be used by cache flush
407 * routines and/or provided to userland
408 */
409static void __init initialize_cache_info(void)
410{
411 struct device_node *np;
412 unsigned long num_cpus = 0;
413
414 DBG(" -> initialize_cache_info()\n");
415
416 for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) {
417 num_cpus += 1;
418
419 /* We're assuming *all* of the CPUs have the same
420 * d-cache and i-cache sizes... -Peter
421 */
422
423 if ( num_cpus == 1 ) {
424 u32 *sizep, *lsizep;
425 u32 size, lsize;
426 const char *dc, *ic;
427
428 /* Then read cache informations */
429 if (systemcfg->platform == PLATFORM_POWERMAC) {
430 dc = "d-cache-block-size";
431 ic = "i-cache-block-size";
432 } else {
433 dc = "d-cache-line-size";
434 ic = "i-cache-line-size";
435 }
436
437 size = 0;
438 lsize = cur_cpu_spec->dcache_bsize;
439 sizep = (u32 *)get_property(np, "d-cache-size", NULL);
440 if (sizep != NULL)
441 size = *sizep;
442 lsizep = (u32 *) get_property(np, dc, NULL);
443 if (lsizep != NULL)
444 lsize = *lsizep;
445 if (sizep == 0 || lsizep == 0)
446 DBG("Argh, can't find dcache properties ! "
447 "sizep: %p, lsizep: %p\n", sizep, lsizep);
448
449 systemcfg->dcache_size = ppc64_caches.dsize = size;
450 systemcfg->dcache_line_size =
451 ppc64_caches.dline_size = lsize;
452 ppc64_caches.log_dline_size = __ilog2(lsize);
453 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
454
455 size = 0;
456 lsize = cur_cpu_spec->icache_bsize;
457 sizep = (u32 *)get_property(np, "i-cache-size", NULL);
458 if (sizep != NULL)
459 size = *sizep;
460 lsizep = (u32 *)get_property(np, ic, NULL);
461 if (lsizep != NULL)
462 lsize = *lsizep;
463 if (sizep == 0 || lsizep == 0)
464 DBG("Argh, can't find icache properties ! "
465 "sizep: %p, lsizep: %p\n", sizep, lsizep);
466
467 systemcfg->icache_size = ppc64_caches.isize = size;
468 systemcfg->icache_line_size =
469 ppc64_caches.iline_size = lsize;
470 ppc64_caches.log_iline_size = __ilog2(lsize);
471 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
472 }
473 }
474
475 /* Add an eye catcher and the systemcfg layout version number */
476 strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
477 systemcfg->version.major = SYSTEMCFG_MAJOR;
478 systemcfg->version.minor = SYSTEMCFG_MINOR;
479 systemcfg->processor = mfspr(SPRN_PVR);
480
481 DBG(" <- initialize_cache_info()\n");
482}
483
484static void __init check_for_initrd(void)
485{
486#ifdef CONFIG_BLK_DEV_INITRD
487 u64 *prop;
488
489 DBG(" -> check_for_initrd()\n");
490
491 if (of_chosen) {
492 prop = (u64 *)get_property(of_chosen,
493 "linux,initrd-start", NULL);
494 if (prop != NULL) {
495 initrd_start = (unsigned long)__va(*prop);
496 prop = (u64 *)get_property(of_chosen,
497 "linux,initrd-end", NULL);
498 if (prop != NULL) {
499 initrd_end = (unsigned long)__va(*prop);
500 initrd_below_start_ok = 1;
501 } else
502 initrd_start = 0;
503 }
504 }
505
506 /* If we were passed an initrd, set the ROOT_DEV properly if the values
507 * look sensible. If not, clear initrd reference.
508 */
509 if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
510 initrd_end > initrd_start)
511 ROOT_DEV = Root_RAM0;
512 else
513 initrd_start = initrd_end = 0;
514
515 if (initrd_start)
516 printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
517
518 DBG(" <- check_for_initrd()\n");
519#endif /* CONFIG_BLK_DEV_INITRD */
520}
521
522/*
523 * Do some initial setup of the system. The parameters are those which
524 * were passed in from the bootloader.
525 */
526void __init setup_system(void)
527{
528 DBG(" -> setup_system()\n");
529
530 /*
531 * Unflatten the device-tree passed by prom_init or kexec
532 */
533 unflatten_device_tree();
534
535 /*
536 * Fill the ppc64_caches & systemcfg structures with informations
537 * retreived from the device-tree. Need to be called before
538 * finish_device_tree() since the later requires some of the
539 * informations filled up here to properly parse the interrupt
540 * tree.
541 * It also sets up the cache line sizes which allows to call
542 * routines like flush_icache_range (used by the hash init
543 * later on).
544 */
545 initialize_cache_info();
546
547#ifdef CONFIG_PPC_RTAS
548 /*
549 * Initialize RTAS if available
550 */
551 rtas_initialize();
552#endif /* CONFIG_PPC_RTAS */
553
554 /*
555 * Check if we have an initrd provided via the device-tree
556 */
557 check_for_initrd();
558
559 /*
560 * Do some platform specific early initializations, that includes
561 * setting up the hash table pointers. It also sets up some interrupt-mapping
562 * related options that will be used by finish_device_tree()
563 */
564 ppc_md.init_early();
565
566 /*
567 * "Finish" the device-tree, that is do the actual parsing of
568 * some of the properties like the interrupt map
569 */
570 finish_device_tree();
571
572#ifdef CONFIG_BOOTX_TEXT
573 init_boot_display();
574#endif
575
576 /*
577 * Initialize xmon
578 */
579#ifdef CONFIG_XMON_DEFAULT
580 xmon_init(1);
581#endif
582 /*
583 * Register early console
584 */
585 register_early_udbg_console();
586
587 /* Save unparsed command line copy for /proc/cmdline */
588 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
589
590 parse_early_param();
591
592#ifdef CONFIG_SMP
593 /*
594 * iSeries has already initialized the cpu maps at this point.
595 */
596 setup_cpu_maps();
597
598 /* Release secondary cpus out of their spinloops at 0x60 now that
599 * we can map physical -> logical CPU ids
600 */
601 smp_release_cpus();
602#endif
603
604 printk("Starting Linux PPC64 %s\n", system_utsname.version);
605
606 printk("-----------------------------------------------------\n");
607 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
608 printk("ppc64_debug_switch = 0x%lx\n", ppc64_debug_switch);
609 printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller);
610 printk("systemcfg = 0x%p\n", systemcfg);
611 printk("systemcfg->platform = 0x%x\n", systemcfg->platform);
612 printk("systemcfg->processorCount = 0x%lx\n", systemcfg->processorCount);
613 printk("systemcfg->physicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize);
614 printk("ppc64_caches.dcache_line_size = 0x%x\n",
615 ppc64_caches.dline_size);
616 printk("ppc64_caches.icache_line_size = 0x%x\n",
617 ppc64_caches.iline_size);
618 printk("htab_address = 0x%p\n", htab_address);
619 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
620 printk("-----------------------------------------------------\n");
621
622 mm_init_ppc64();
623
624 DBG(" <- setup_system()\n");
625}
626
627static int ppc64_panic_event(struct notifier_block *this,
628 unsigned long event, void *ptr)
629{
630 ppc_md.panic((char *)ptr); /* May not return */
631 return NOTIFY_DONE;
632}
633
634#ifdef CONFIG_PPC_ISERIES
635/*
636 * On iSeries we just parse the mem=X option from the command line.
637 * On pSeries it's a bit more complicated, see prom_init_mem()
638 */
639static int __init early_parsemem(char *p)
640{
641 if (!p)
642 return 0;
643
644 memory_limit = ALIGN(memparse(p, &p), PAGE_SIZE);
645
646 return 0;
647}
648early_param("mem", early_parsemem);
649#endif /* CONFIG_PPC_ISERIES */
650
651#ifdef CONFIG_IRQSTACKS
652static void __init irqstack_early_init(void)
653{
654 unsigned int i;
655
656 /*
657 * interrupt stacks must be under 256MB, we cannot afford to take
658 * SLB misses on them.
659 */
660 for_each_cpu(i) {
661 softirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
662 THREAD_SIZE, 0x10000000));
663 hardirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
664 THREAD_SIZE, 0x10000000));
665 }
666}
667#else
668#define irqstack_early_init()
669#endif
670
671/*
672 * Stack space used when we detect a bad kernel stack pointer, and
673 * early in SMP boots before relocation is enabled.
674 */
675static void __init emergency_stack_init(void)
676{
677 unsigned long limit;
678 unsigned int i;
679
680 /*
681 * Emergency stacks must be under 256MB, we cannot afford to take
682 * SLB misses on them. The ABI also requires them to be 128-byte
683 * aligned.
684 *
685 * Since we use these as temporary stacks during secondary CPU
686 * bringup, we need to get at them in real mode. This means they
687 * must also be within the RMO region.
688 */
689 limit = min(0x10000000UL, lmb.rmo_size);
690
691 for_each_cpu(i)
692 paca[i].emergency_sp = __va(lmb_alloc_base(PAGE_SIZE, 128,
693 limit)) + PAGE_SIZE;
694}
695
696/*
697 * Called from setup_arch to initialize the bitmap of available
698 * syscalls in the systemcfg page
699 */
700void __init setup_syscall_map(void)
701{
702 unsigned int i, count64 = 0, count32 = 0;
703 extern unsigned long *sys_call_table;
704 extern unsigned long sys_ni_syscall;
705
706
707 for (i = 0; i < __NR_syscalls; i++) {
708 if (sys_call_table[i*2] != sys_ni_syscall) {
709 count64++;
710 systemcfg->syscall_map_64[i >> 5] |=
711 0x80000000UL >> (i & 0x1f);
712 }
713 if (sys_call_table[i*2+1] != sys_ni_syscall) {
714 count32++;
715 systemcfg->syscall_map_32[i >> 5] |=
716 0x80000000UL >> (i & 0x1f);
717 }
718 }
719 printk(KERN_INFO "Syscall map setup, %d 32-bit and %d 64-bit syscalls\n",
720 count32, count64);
721}
722
723/*
724 * Called into from start_kernel, after lock_kernel has been called.
725 * Initializes bootmem, which is unsed to manage page allocation until
726 * mem_init is called.
727 */
728void __init setup_arch(char **cmdline_p)
729{
730 extern void do_init_bootmem(void);
731
732 ppc64_boot_msg(0x12, "Setup Arch");
733
734 *cmdline_p = cmd_line;
735
736 /*
737 * Set cache line size based on type of cpu as a default.
738 * Systems with OF can look in the properties on the cpu node(s)
739 * for a possibly more accurate value.
740 */
741 dcache_bsize = ppc64_caches.dline_size;
742 icache_bsize = ppc64_caches.iline_size;
743
744 /* reboot on panic */
745 panic_timeout = 180;
746
747 if (ppc_md.panic)
748 notifier_chain_register(&panic_notifier_list, &ppc64_panic_block);
749
750 init_mm.start_code = PAGE_OFFSET;
751 init_mm.end_code = (unsigned long) _etext;
752 init_mm.end_data = (unsigned long) _edata;
753 init_mm.brk = klimit;
754
755 irqstack_early_init();
756 emergency_stack_init();
757
758 stabs_alloc();
759
760 /* set up the bootmem stuff with available memory */
761 do_init_bootmem();
762 sparse_init();
763
764 /* initialize the syscall map in systemcfg */
765 setup_syscall_map();
766
767#ifdef CONFIG_DUMMY_CONSOLE
768 conswitchp = &dummy_con;
769#endif
770
771 ppc_md.setup_arch();
772
773 /* Use the default idle loop if the platform hasn't provided one. */
774 if (NULL == ppc_md.idle_loop) {
775 ppc_md.idle_loop = default_idle;
776 printk(KERN_INFO "Using default idle loop\n");
777 }
778
779 paging_init();
780 ppc64_boot_msg(0x15, "Setup Done");
781}
782
783
784/* ToDo: do something useful if ppc_md is not yet setup. */
785#define PPC64_LINUX_FUNCTION 0x0f000000
786#define PPC64_IPL_MESSAGE 0xc0000000
787#define PPC64_TERM_MESSAGE 0xb0000000
788
789static void ppc64_do_msg(unsigned int src, const char *msg)
790{
791 if (ppc_md.progress) {
792 char buf[128];
793
794 sprintf(buf, "%08X\n", src);
795 ppc_md.progress(buf, 0);
796 snprintf(buf, 128, "%s", msg);
797 ppc_md.progress(buf, 0);
798 }
799}
800
801/* Print a boot progress message. */
802void ppc64_boot_msg(unsigned int src, const char *msg)
803{
804 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
805 printk("[boot]%04x %s\n", src, msg);
806}
807
808/* Print a termination message (print only -- does not stop the kernel) */
809void ppc64_terminate_msg(unsigned int src, const char *msg)
810{
811 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_TERM_MESSAGE|src, msg);
812 printk("[terminate]%04x %s\n", src, msg);
813}
814
815#ifndef CONFIG_PPC_ISERIES
816/*
817 * This function can be used by platforms to "find" legacy serial ports.
818 * It works for "serial" nodes under an "isa" node, and will try to
819 * respect the "ibm,aix-loc" property if any. It works with up to 8
820 * ports.
821 */
822
823#define MAX_LEGACY_SERIAL_PORTS 8
824static struct plat_serial8250_port serial_ports[MAX_LEGACY_SERIAL_PORTS+1];
825static unsigned int old_serial_count;
826
827void __init generic_find_legacy_serial_ports(u64 *physport,
828 unsigned int *default_speed)
829{
830 struct device_node *np;
831 u32 *sizeprop;
832
833 struct isa_reg_property {
834 u32 space;
835 u32 address;
836 u32 size;
837 };
838 struct pci_reg_property {
839 struct pci_address addr;
840 u32 size_hi;
841 u32 size_lo;
842 };
843
844 DBG(" -> generic_find_legacy_serial_port()\n");
845
846 *physport = 0;
847 if (default_speed)
848 *default_speed = 0;
849
850 np = of_find_node_by_path("/");
851 if (!np)
852 return;
853
854 /* First fill our array */
855 for (np = NULL; (np = of_find_node_by_type(np, "serial"));) {
856 struct device_node *isa, *pci;
857 struct isa_reg_property *reg;
858 unsigned long phys_size, addr_size, io_base;
859 u32 *rangesp;
860 u32 *interrupts, *clk, *spd;
861 char *typep;
862 int index, rlen, rentsize;
863
864 /* Ok, first check if it's under an "isa" parent */
865 isa = of_get_parent(np);
866 if (!isa || strcmp(isa->name, "isa")) {
867 DBG("%s: no isa parent found\n", np->full_name);
868 continue;
869 }
870
871 /* Now look for an "ibm,aix-loc" property that gives us ordering
872 * if any...
873 */
874 typep = (char *)get_property(np, "ibm,aix-loc", NULL);
875
876 /* Get the ISA port number */
877 reg = (struct isa_reg_property *)get_property(np, "reg", NULL);
878 if (reg == NULL)
879 goto next_port;
880 /* We assume the interrupt number isn't translated ... */
881 interrupts = (u32 *)get_property(np, "interrupts", NULL);
882 /* get clock freq. if present */
883 clk = (u32 *)get_property(np, "clock-frequency", NULL);
884 /* get default speed if present */
885 spd = (u32 *)get_property(np, "current-speed", NULL);
886 /* Default to locate at end of array */
887 index = old_serial_count; /* end of the array by default */
888
889 /* If we have a location index, then use it */
890 if (typep && *typep == 'S') {
891 index = simple_strtol(typep+1, NULL, 0) - 1;
892 /* if index is out of range, use end of array instead */
893 if (index >= MAX_LEGACY_SERIAL_PORTS)
894 index = old_serial_count;
895 /* if our index is still out of range, that mean that
896 * array is full, we could scan for a free slot but that
897 * make little sense to bother, just skip the port
898 */
899 if (index >= MAX_LEGACY_SERIAL_PORTS)
900 goto next_port;
901 if (index >= old_serial_count)
902 old_serial_count = index + 1;
903 /* Check if there is a port who already claimed our slot */
904 if (serial_ports[index].iobase != 0) {
905 /* if we still have some room, move it, else override */
906 if (old_serial_count < MAX_LEGACY_SERIAL_PORTS) {
907 DBG("Moved legacy port %d -> %d\n", index,
908 old_serial_count);
909 serial_ports[old_serial_count++] =
910 serial_ports[index];
911 } else {
912 DBG("Replacing legacy port %d\n", index);
913 }
914 }
915 }
916 if (index >= MAX_LEGACY_SERIAL_PORTS)
917 goto next_port;
918 if (index >= old_serial_count)
919 old_serial_count = index + 1;
920
921 /* Now fill the entry */
922 memset(&serial_ports[index], 0, sizeof(struct plat_serial8250_port));
923 serial_ports[index].uartclk = clk ? *clk : BASE_BAUD * 16;
924 serial_ports[index].iobase = reg->address;
925 serial_ports[index].irq = interrupts ? interrupts[0] : 0;
926 serial_ports[index].flags = ASYNC_BOOT_AUTOCONF;
927
928 DBG("Added legacy port, index: %d, port: %x, irq: %d, clk: %d\n",
929 index,
930 serial_ports[index].iobase,
931 serial_ports[index].irq,
932 serial_ports[index].uartclk);
933
934 /* Get phys address of IO reg for port 1 */
935 if (index != 0)
936 goto next_port;
937
938 pci = of_get_parent(isa);
939 if (!pci) {
940 DBG("%s: no pci parent found\n", np->full_name);
941 goto next_port;
942 }
943
944 rangesp = (u32 *)get_property(pci, "ranges", &rlen);
945 if (rangesp == NULL) {
946 of_node_put(pci);
947 goto next_port;
948 }
949 rlen /= 4;
950
951 /* we need the #size-cells of the PCI bridge node itself */
952 phys_size = 1;
953 sizeprop = (u32 *)get_property(pci, "#size-cells", NULL);
954 if (sizeprop != NULL)
955 phys_size = *sizeprop;
956 /* we need the parent #addr-cells */
957 addr_size = prom_n_addr_cells(pci);
958 rentsize = 3 + addr_size + phys_size;
959 io_base = 0;
960 for (;rlen >= rentsize; rlen -= rentsize,rangesp += rentsize) {
961 if (((rangesp[0] >> 24) & 0x3) != 1)
962 continue; /* not IO space */
963 io_base = rangesp[3];
964 if (addr_size == 2)
965 io_base = (io_base << 32) | rangesp[4];
966 }
967 if (io_base != 0) {
968 *physport = io_base + reg->address;
969 if (default_speed && spd)
970 *default_speed = *spd;
971 }
972 of_node_put(pci);
973 next_port:
974 of_node_put(isa);
975 }
976
977 DBG(" <- generic_find_legacy_serial_port()\n");
978}
979
980static struct platform_device serial_device = {
981 .name = "serial8250",
982 .id = PLAT8250_DEV_PLATFORM,
983 .dev = {
984 .platform_data = serial_ports,
985 },
986};
987
988static int __init serial_dev_init(void)
989{
990 return platform_device_register(&serial_device);
991}
992arch_initcall(serial_dev_init);
993
994#endif /* CONFIG_PPC_ISERIES */
995
996int check_legacy_ioport(unsigned long base_port)
997{
998 if (ppc_md.check_legacy_ioport == NULL)
999 return 0;
1000 return ppc_md.check_legacy_ioport(base_port);
1001}
1002EXPORT_SYMBOL(check_legacy_ioport);
1003
1004#ifdef CONFIG_XMON
1005static int __init early_xmon(char *p)
1006{
1007 /* ensure xmon is enabled */
1008 if (p) {
1009 if (strncmp(p, "on", 2) == 0)
1010 xmon_init(1);
1011 if (strncmp(p, "off", 3) == 0)
1012 xmon_init(0);
1013 if (strncmp(p, "early", 5) != 0)
1014 return 0;
1015 }
1016 xmon_init(1);
1017 debugger(NULL);
1018
1019 return 0;
1020}
1021early_param("xmon", early_xmon);
1022#endif
1023
1024void cpu_die(void)
1025{
1026 if (ppc_md.cpu_die)
1027 ppc_md.cpu_die();
1028}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
new file mode 100644
index 000000000000..444c3e81884c
--- /dev/null
+++ b/arch/powerpc/kernel/signal_32.c
@@ -0,0 +1,1269 @@
1/*
2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Copyright (C) 2001 IBM
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 *
10 * Derived from "arch/i386/kernel/signal.c"
11 * Copyright (C) 1991, 1992 Linus Torvalds
12 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 */
19
20#include <linux/config.h>
21#include <linux/sched.h>
22#include <linux/mm.h>
23#include <linux/smp.h>
24#include <linux/smp_lock.h>
25#include <linux/kernel.h>
26#include <linux/signal.h>
27#include <linux/errno.h>
28#include <linux/elf.h>
29#ifdef CONFIG_PPC64
30#include <linux/syscalls.h>
31#include <linux/compat.h>
32#include <linux/ptrace.h>
33#else
34#include <linux/wait.h>
35#include <linux/ptrace.h>
36#include <linux/unistd.h>
37#include <linux/stddef.h>
38#include <linux/tty.h>
39#include <linux/binfmts.h>
40#include <linux/suspend.h>
41#endif
42
43#include <asm/uaccess.h>
44#include <asm/cacheflush.h>
45#ifdef CONFIG_PPC64
46#include <asm/ppc32.h>
47#include <asm/ppcdebug.h>
48#include <asm/unistd.h>
49#include <asm/vdso.h>
50#else
51#include <asm/ucontext.h>
52#include <asm/pgtable.h>
53#endif
54
55#undef DEBUG_SIG
56
57#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
58
59#ifdef CONFIG_PPC64
60#define do_signal do_signal32
61#define sys_sigsuspend compat_sys_sigsuspend
62#define sys_rt_sigsuspend compat_sys_rt_sigsuspend
63#define sys_rt_sigreturn compat_sys_rt_sigreturn
64#define sys_sigaction compat_sys_sigaction
65#define sys_swapcontext compat_sys_swapcontext
66#define sys_sigreturn compat_sys_sigreturn
67
68#define old_sigaction old_sigaction32
69#define sigcontext sigcontext32
70#define mcontext mcontext32
71#define ucontext ucontext32
72
73/*
74 * Returning 0 means we return to userspace via
75 * ret_from_except and thus restore all user
76 * registers from *regs. This is what we need
77 * to do when a signal has been delivered.
78 */
79#define sigreturn_exit(regs) return 0
80
81#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
82#undef __SIGNAL_FRAMESIZE
83#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
84#undef ELF_NVRREG
85#define ELF_NVRREG ELF_NVRREG32
86
87/*
88 * Functions for flipping sigsets (thanks to brain dead generic
89 * implementation that makes things simple for little endian only)
90 */
91static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
92{
93 compat_sigset_t cset;
94
95 switch (_NSIG_WORDS) {
96 case 4: cset.sig[5] = set->sig[3] & 0xffffffffull;
97 cset.sig[7] = set->sig[3] >> 32;
98 case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
99 cset.sig[5] = set->sig[2] >> 32;
100 case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
101 cset.sig[3] = set->sig[1] >> 32;
102 case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
103 cset.sig[1] = set->sig[0] >> 32;
104 }
105 return copy_to_user(uset, &cset, sizeof(*uset));
106}
107
108static inline int get_sigset_t(sigset_t *set,
109 const compat_sigset_t __user *uset)
110{
111 compat_sigset_t s32;
112
113 if (copy_from_user(&s32, uset, sizeof(*uset)))
114 return -EFAULT;
115
116 /*
117 * Swap the 2 words of the 64-bit sigset_t (they are stored
118 * in the "wrong" endian in 32-bit user storage).
119 */
120 switch (_NSIG_WORDS) {
121 case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
122 case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
123 case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
124 case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
125 }
126 return 0;
127}
128
129static inline int get_old_sigaction(struct k_sigaction *new_ka,
130 struct old_sigaction __user *act)
131{
132 compat_old_sigset_t mask;
133 compat_uptr_t handler, restorer;
134
135 if (get_user(handler, &act->sa_handler) ||
136 __get_user(restorer, &act->sa_restorer) ||
137 __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
138 __get_user(mask, &act->sa_mask))
139 return -EFAULT;
140 new_ka->sa.sa_handler = compat_ptr(handler);
141 new_ka->sa.sa_restorer = compat_ptr(restorer);
142 siginitset(&new_ka->sa.sa_mask, mask);
143 return 0;
144}
145
146static inline compat_uptr_t to_user_ptr(void *kp)
147{
148 return (compat_uptr_t)(u64)kp;
149}
150
151#define from_user_ptr(p) compat_ptr(p)
152
153static inline int save_general_regs(struct pt_regs *regs,
154 struct mcontext __user *frame)
155{
156 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
157 int i;
158
159 for (i = 0; i <= PT_RESULT; i ++)
160 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
161 return -EFAULT;
162 return 0;
163}
164
165static inline int restore_general_regs(struct pt_regs *regs,
166 struct mcontext __user *sr)
167{
168 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
169 int i;
170
171 for (i = 0; i <= PT_RESULT; i++) {
172 if ((i == PT_MSR) || (i == PT_SOFTE))
173 continue;
174 if (__get_user(gregs[i], &sr->mc_gregs[i]))
175 return -EFAULT;
176 }
177 return 0;
178}
179
180#else /* CONFIG_PPC64 */
181
182extern void sigreturn_exit(struct pt_regs *);
183
184#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
185
186static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
187{
188 return copy_to_user(uset, set, sizeof(*uset));
189}
190
191static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
192{
193 return copy_from_user(set, uset, sizeof(*uset));
194}
195
196static inline int get_old_sigaction(struct k_sigaction *new_ka,
197 struct old_sigaction __user *act)
198{
199 old_sigset_t mask;
200
201 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
202 __get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
203 __get_user(new_ka->sa.sa_restorer, &act->sa_restorer))
204 return -EFAULT;
205 __get_user(new_ka->sa.sa_flags, &act->sa_flags);
206 __get_user(mask, &act->sa_mask);
207 siginitset(&new_ka->sa.sa_mask, mask);
208 return 0;
209}
210
211#define to_user_ptr(p) (p)
212#define from_user_ptr(p) (p)
213
214static inline int save_general_regs(struct pt_regs *regs,
215 struct mcontext __user *frame)
216{
217 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
218}
219
220static inline int restore_general_regs(struct pt_regs *regs,
221 struct mcontext __user *sr)
222{
223 /* copy up to but not including MSR */
224 if (__copy_from_user(regs, &sr->mc_gregs,
225 PT_MSR * sizeof(elf_greg_t)))
226 return -EFAULT;
227 /* copy from orig_r3 (the word after the MSR) up to the end */
228 if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
229 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
230 return -EFAULT;
231 return 0;
232}
233
234#endif /* CONFIG_PPC64 */
235
236int do_signal(sigset_t *oldset, struct pt_regs *regs);
237
238/*
239 * Atomically swap in the new signal mask, and wait for a signal.
240 */
241long sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
242 struct pt_regs *regs)
243{
244 sigset_t saveset;
245
246 mask &= _BLOCKABLE;
247 spin_lock_irq(&current->sighand->siglock);
248 saveset = current->blocked;
249 siginitset(&current->blocked, mask);
250 recalc_sigpending();
251 spin_unlock_irq(&current->sighand->siglock);
252
253 regs->result = -EINTR;
254 regs->gpr[3] = EINTR;
255 regs->ccr |= 0x10000000;
256 while (1) {
257 current->state = TASK_INTERRUPTIBLE;
258 schedule();
259 if (do_signal(&saveset, regs))
260 sigreturn_exit(regs);
261 }
262}
263
264long sys_rt_sigsuspend(
265#ifdef CONFIG_PPC64
266 compat_sigset_t __user *unewset,
267#else
268 sigset_t __user *unewset,
269#endif
270 size_t sigsetsize, int p3, int p4,
271 int p6, int p7, struct pt_regs *regs)
272{
273 sigset_t saveset, newset;
274
275 /* XXX: Don't preclude handling different sized sigset_t's. */
276 if (sigsetsize != sizeof(sigset_t))
277 return -EINVAL;
278
279 if (get_sigset_t(&newset, unewset))
280 return -EFAULT;
281 sigdelsetmask(&newset, ~_BLOCKABLE);
282
283 spin_lock_irq(&current->sighand->siglock);
284 saveset = current->blocked;
285 current->blocked = newset;
286 recalc_sigpending();
287 spin_unlock_irq(&current->sighand->siglock);
288
289 regs->result = -EINTR;
290 regs->gpr[3] = EINTR;
291 regs->ccr |= 0x10000000;
292 while (1) {
293 current->state = TASK_INTERRUPTIBLE;
294 schedule();
295 if (do_signal(&saveset, regs))
296 sigreturn_exit(regs);
297 }
298}
299
300#ifdef CONFIG_PPC32
301long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, int r5,
302 int r6, int r7, int r8, struct pt_regs *regs)
303{
304 return do_sigaltstack(uss, uoss, regs->gpr[1]);
305}
306#endif
307
308long sys_sigaction(int sig, struct old_sigaction __user *act,
309 struct old_sigaction __user *oact)
310{
311 struct k_sigaction new_ka, old_ka;
312 int ret;
313
314#ifdef CONFIG_PPC64
315 if (sig < 0)
316 sig = -sig;
317#endif
318
319 if (act) {
320 if (get_old_sigaction(&new_ka, act))
321 return -EFAULT;
322 }
323
324 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
325 if (!ret && oact) {
326 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
327 __put_user(to_user_ptr(old_ka.sa.sa_handler),
328 &oact->sa_handler) ||
329 __put_user(to_user_ptr(old_ka.sa.sa_restorer),
330 &oact->sa_restorer) ||
331 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
332 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
333 return -EFAULT;
334 }
335
336 return ret;
337}
338
339/*
340 * When we have signals to deliver, we set up on the
341 * user stack, going down from the original stack pointer:
342 * a sigregs struct
343 * a sigcontext struct
344 * a gap of __SIGNAL_FRAMESIZE bytes
345 *
346 * Each of these things must be a multiple of 16 bytes in size.
347 *
348 */
349struct sigregs {
350 struct mcontext mctx; /* all the register values */
351 /*
352 * Programs using the rs6000/xcoff abi can save up to 19 gp
353 * regs and 18 fp regs below sp before decrementing it.
354 */
355 int abigap[56];
356};
357
358/* We use the mc_pad field for the signal return trampoline. */
359#define tramp mc_pad
360
361/*
362 * When we have rt signals to deliver, we set up on the
363 * user stack, going down from the original stack pointer:
364 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
365 * a gap of __SIGNAL_FRAMESIZE+16 bytes
366 * (the +16 is to get the siginfo and ucontext in the same
367 * positions as in older kernels).
368 *
369 * Each of these things must be a multiple of 16 bytes in size.
370 *
371 */
372struct rt_sigframe {
373#ifdef CONFIG_PPC64
374 compat_siginfo_t info;
375#else
376 struct siginfo info;
377#endif
378 struct ucontext uc;
379 /*
380 * Programs using the rs6000/xcoff abi can save up to 19 gp
381 * regs and 18 fp regs below sp before decrementing it.
382 */
383 int abigap[56];
384};
385
386/*
387 * Save the current user registers on the user stack.
388 * We only save the altivec/spe registers if the process has used
389 * altivec/spe instructions at some point.
390 */
391static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
392 int sigret)
393{
394#ifdef CONFIG_PPC32
395 CHECK_FULL_REGS(regs);
396#endif
397 /* Make sure floating point registers are stored in regs */
398 flush_fp_to_thread(current);
399
400 /* save general and floating-point registers */
401 if (save_general_regs(regs, frame) ||
402 __copy_to_user(&frame->mc_fregs, current->thread.fpr,
403 ELF_NFPREG * sizeof(double)))
404 return 1;
405
406 current->thread.fpscr.val = 0; /* turn off all fp exceptions */
407
408#ifdef CONFIG_ALTIVEC
409 /* save altivec registers */
410 if (current->thread.used_vr) {
411 flush_altivec_to_thread(current);
412 if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
413 ELF_NVRREG * sizeof(vector128)))
414 return 1;
415 /* set MSR_VEC in the saved MSR value to indicate that
416 frame->mc_vregs contains valid data */
417 if (__put_user(regs->msr | MSR_VEC, &frame->mc_gregs[PT_MSR]))
418 return 1;
419 }
420 /* else assert((regs->msr & MSR_VEC) == 0) */
421
422 /* We always copy to/from vrsave, it's 0 if we don't have or don't
423 * use altivec. Since VSCR only contains 32 bits saved in the least
424 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
425 * most significant bits of that same vector. --BenH
426 */
427 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
428 return 1;
429#endif /* CONFIG_ALTIVEC */
430
431#ifdef CONFIG_SPE
432 /* save spe registers */
433 if (current->thread.used_spe) {
434 flush_spe_to_thread(current);
435 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
436 ELF_NEVRREG * sizeof(u32)))
437 return 1;
438 /* set MSR_SPE in the saved MSR value to indicate that
439 frame->mc_vregs contains valid data */
440 if (__put_user(regs->msr | MSR_SPE, &frame->mc_gregs[PT_MSR]))
441 return 1;
442 }
443 /* else assert((regs->msr & MSR_SPE) == 0) */
444
445 /* We always copy to/from spefscr */
446 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
447 return 1;
448#endif /* CONFIG_SPE */
449
450 if (sigret) {
451 /* Set up the sigreturn trampoline: li r0,sigret; sc */
452 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
453 || __put_user(0x44000002UL, &frame->tramp[1]))
454 return 1;
455 flush_icache_range((unsigned long) &frame->tramp[0],
456 (unsigned long) &frame->tramp[2]);
457 }
458
459 return 0;
460}
461
462/*
463 * Restore the current user register values from the user stack,
464 * (except for MSR).
465 */
466static long restore_user_regs(struct pt_regs *regs,
467 struct mcontext __user *sr, int sig)
468{
469 long err;
470 unsigned int save_r2 = 0;
471#if defined(CONFIG_ALTIVEC) || defined(CONFIG_SPE)
472 unsigned long msr;
473#endif
474
475 /*
476 * restore general registers but not including MSR or SOFTE. Also
477 * take care of keeping r2 (TLS) intact if not a signal
478 */
479 if (!sig)
480 save_r2 = (unsigned int)regs->gpr[2];
481 err = restore_general_regs(regs, sr);
482 if (!sig)
483 regs->gpr[2] = (unsigned long) save_r2;
484 if (err)
485 return 1;
486
487 /* force the process to reload the FP registers from
488 current->thread when it next does FP instructions */
489 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
490 if (__copy_from_user(current->thread.fpr, &sr->mc_fregs,
491 sizeof(sr->mc_fregs)))
492 return 1;
493
494#ifdef CONFIG_ALTIVEC
495 /* force the process to reload the altivec registers from
496 current->thread when it next does altivec instructions */
497 regs->msr &= ~MSR_VEC;
498 if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_VEC) != 0) {
499 /* restore altivec registers from the stack */
500 if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
501 sizeof(sr->mc_vregs)))
502 return 1;
503 } else if (current->thread.used_vr)
504 memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
505
506 /* Always get VRSAVE back */
507 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
508 return 1;
509#endif /* CONFIG_ALTIVEC */
510
511#ifdef CONFIG_SPE
512 /* force the process to reload the spe registers from
513 current->thread when it next does spe instructions */
514 regs->msr &= ~MSR_SPE;
515 if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_SPE) != 0) {
516 /* restore spe registers from the stack */
517 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
518 ELF_NEVRREG * sizeof(u32)))
519 return 1;
520 } else if (current->thread.used_spe)
521 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
522
523 /* Always get SPEFSCR back */
524 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
525 return 1;
526#endif /* CONFIG_SPE */
527
528#ifndef CONFIG_SMP
529 preempt_disable();
530 if (last_task_used_math == current)
531 last_task_used_math = NULL;
532 if (last_task_used_altivec == current)
533 last_task_used_altivec = NULL;
534#ifdef CONFIG_SPE
535 if (last_task_used_spe == current)
536 last_task_used_spe = NULL;
537#endif
538 preempt_enable();
539#endif
540 return 0;
541}
542
543#ifdef CONFIG_PPC64
544long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
545 struct sigaction32 __user *oact, size_t sigsetsize)
546{
547 struct k_sigaction new_ka, old_ka;
548 int ret;
549
550 /* XXX: Don't preclude handling different sized sigset_t's. */
551 if (sigsetsize != sizeof(compat_sigset_t))
552 return -EINVAL;
553
554 if (act) {
555 compat_uptr_t handler;
556
557 ret = get_user(handler, &act->sa_handler);
558 new_ka.sa.sa_handler = compat_ptr(handler);
559 ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
560 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
561 if (ret)
562 return -EFAULT;
563 }
564
565 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
566 if (!ret && oact) {
567 ret = put_user((long)old_ka.sa.sa_handler, &oact->sa_handler);
568 ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
569 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
570 }
571 return ret;
572}
573
574/*
575 * Note: it is necessary to treat how as an unsigned int, with the
576 * corresponding cast to a signed int to insure that the proper
577 * conversion (sign extension) between the register representation
578 * of a signed int (msr in 32-bit mode) and the register representation
579 * of a signed int (msr in 64-bit mode) is performed.
580 */
581long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
582 compat_sigset_t __user *oset, size_t sigsetsize)
583{
584 sigset_t s;
585 sigset_t __user *up;
586 int ret;
587 mm_segment_t old_fs = get_fs();
588
589 if (set) {
590 if (get_sigset_t(&s, set))
591 return -EFAULT;
592 }
593
594 set_fs(KERNEL_DS);
595 /* This is valid because of the set_fs() */
596 up = (sigset_t __user *) &s;
597 ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
598 sigsetsize);
599 set_fs(old_fs);
600 if (ret)
601 return ret;
602 if (oset) {
603 if (put_sigset_t(oset, &s))
604 return -EFAULT;
605 }
606 return 0;
607}
608
609long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
610{
611 sigset_t s;
612 int ret;
613 mm_segment_t old_fs = get_fs();
614
615 set_fs(KERNEL_DS);
616 /* The __user pointer cast is valid because of the set_fs() */
617 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
618 set_fs(old_fs);
619 if (!ret) {
620 if (put_sigset_t(set, &s))
621 return -EFAULT;
622 }
623 return ret;
624}
625
626
627int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
628{
629 int err;
630
631 if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
632 return -EFAULT;
633
634 /* If you change siginfo_t structure, please be sure
635 * this code is fixed accordingly.
636 * It should never copy any pad contained in the structure
637 * to avoid security leaks, but must copy the generic
638 * 3 ints plus the relevant union member.
639 * This routine must convert siginfo from 64bit to 32bit as well
640 * at the same time.
641 */
642 err = __put_user(s->si_signo, &d->si_signo);
643 err |= __put_user(s->si_errno, &d->si_errno);
644 err |= __put_user((short)s->si_code, &d->si_code);
645 if (s->si_code < 0)
646 err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
647 SI_PAD_SIZE32);
648 else switch(s->si_code >> 16) {
649 case __SI_CHLD >> 16:
650 err |= __put_user(s->si_pid, &d->si_pid);
651 err |= __put_user(s->si_uid, &d->si_uid);
652 err |= __put_user(s->si_utime, &d->si_utime);
653 err |= __put_user(s->si_stime, &d->si_stime);
654 err |= __put_user(s->si_status, &d->si_status);
655 break;
656 case __SI_FAULT >> 16:
657 err |= __put_user((unsigned int)(unsigned long)s->si_addr,
658 &d->si_addr);
659 break;
660 case __SI_POLL >> 16:
661 err |= __put_user(s->si_band, &d->si_band);
662 err |= __put_user(s->si_fd, &d->si_fd);
663 break;
664 case __SI_TIMER >> 16:
665 err |= __put_user(s->si_tid, &d->si_tid);
666 err |= __put_user(s->si_overrun, &d->si_overrun);
667 err |= __put_user(s->si_int, &d->si_int);
668 break;
669 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
670 case __SI_MESGQ >> 16:
671 err |= __put_user(s->si_int, &d->si_int);
672 /* fallthrough */
673 case __SI_KILL >> 16:
674 default:
675 err |= __put_user(s->si_pid, &d->si_pid);
676 err |= __put_user(s->si_uid, &d->si_uid);
677 break;
678 }
679 return err;
680}
681
682#define copy_siginfo_to_user copy_siginfo_to_user32
683
684/*
685 * Note: it is necessary to treat pid and sig as unsigned ints, with the
686 * corresponding cast to a signed int to insure that the proper conversion
687 * (sign extension) between the register representation of a signed int
688 * (msr in 32-bit mode) and the register representation of a signed int
689 * (msr in 64-bit mode) is performed.
690 */
691long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
692{
693 siginfo_t info;
694 int ret;
695 mm_segment_t old_fs = get_fs();
696
697 if (copy_from_user (&info, uinfo, 3*sizeof(int)) ||
698 copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE32))
699 return -EFAULT;
700 set_fs (KERNEL_DS);
701 /* The __user pointer cast is valid becasuse of the set_fs() */
702 ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info);
703 set_fs (old_fs);
704 return ret;
705}
706/*
707 * Start Alternate signal stack support
708 *
709 * System Calls
710 * sigaltatck compat_sys_sigaltstack
711 */
712
713int compat_sys_sigaltstack(u32 __new, u32 __old, int r5,
714 int r6, int r7, int r8, struct pt_regs *regs)
715{
716 stack_32_t __user * newstack = (stack_32_t __user *)(long) __new;
717 stack_32_t __user * oldstack = (stack_32_t __user *)(long) __old;
718 stack_t uss, uoss;
719 int ret;
720 mm_segment_t old_fs;
721 unsigned long sp;
722 compat_uptr_t ss_sp;
723
724 /*
725 * set sp to the user stack on entry to the system call
726 * the system call router sets R9 to the saved registers
727 */
728 sp = regs->gpr[1];
729
730 /* Put new stack info in local 64 bit stack struct */
731 if (newstack) {
732 if (get_user(ss_sp, &newstack->ss_sp) ||
733 __get_user(uss.ss_flags, &newstack->ss_flags) ||
734 __get_user(uss.ss_size, &newstack->ss_size))
735 return -EFAULT;
736 uss.ss_sp = compat_ptr(ss_sp);
737 }
738
739 old_fs = get_fs();
740 set_fs(KERNEL_DS);
741 /* The __user pointer casts are valid because of the set_fs() */
742 ret = do_sigaltstack(
743 newstack ? (stack_t __user *) &uss : NULL,
744 oldstack ? (stack_t __user *) &uoss : NULL,
745 sp);
746 set_fs(old_fs);
747 /* Copy the stack information to the user output buffer */
748 if (!ret && oldstack &&
749 (put_user((long)uoss.ss_sp, &oldstack->ss_sp) ||
750 __put_user(uoss.ss_flags, &oldstack->ss_flags) ||
751 __put_user(uoss.ss_size, &oldstack->ss_size)))
752 return -EFAULT;
753 return ret;
754}
755#endif /* CONFIG_PPC64 */
756
757
758/*
759 * Restore the user process's signal mask
760 */
761#ifdef CONFIG_PPC64
762extern void restore_sigmask(sigset_t *set);
763#else /* CONFIG_PPC64 */
764static void restore_sigmask(sigset_t *set)
765{
766 sigdelsetmask(set, ~_BLOCKABLE);
767 spin_lock_irq(&current->sighand->siglock);
768 current->blocked = *set;
769 recalc_sigpending();
770 spin_unlock_irq(&current->sighand->siglock);
771}
772#endif
773
774/*
775 * Set up a signal frame for a "real-time" signal handler
776 * (one which gets siginfo).
777 */
778static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
779 siginfo_t *info, sigset_t *oldset,
780 struct pt_regs *regs, unsigned long newsp)
781{
782 struct rt_sigframe __user *rt_sf;
783 struct mcontext __user *frame;
784 unsigned long origsp = newsp;
785
786 /* Set up Signal Frame */
787 /* Put a Real Time Context onto stack */
788 newsp -= sizeof(*rt_sf);
789 rt_sf = (struct rt_sigframe __user *)newsp;
790
791 /* create a stack frame for the caller of the handler */
792 newsp -= __SIGNAL_FRAMESIZE + 16;
793
794 if (!access_ok(VERIFY_WRITE, (void __user *)newsp, origsp - newsp))
795 goto badframe;
796
797 /* Put the siginfo & fill in most of the ucontext */
798 if (copy_siginfo_to_user(&rt_sf->info, info)
799 || __put_user(0, &rt_sf->uc.uc_flags)
800 || __put_user(0, &rt_sf->uc.uc_link)
801 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
802 || __put_user(sas_ss_flags(regs->gpr[1]),
803 &rt_sf->uc.uc_stack.ss_flags)
804 || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
805 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
806 &rt_sf->uc.uc_regs)
807 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
808 goto badframe;
809
810 /* Save user registers on the stack */
811 frame = &rt_sf->uc.uc_mcontext;
812#ifdef CONFIG_PPC64
813 if (vdso32_rt_sigtramp && current->thread.vdso_base) {
814 if (save_user_regs(regs, frame, 0))
815 goto badframe;
816 regs->link = current->thread.vdso_base + vdso32_rt_sigtramp;
817 } else
818#endif
819 {
820 if (save_user_regs(regs, frame, __NR_rt_sigreturn))
821 goto badframe;
822 regs->link = (unsigned long) frame->tramp;
823 }
824 if (put_user(regs->gpr[1], (u32 __user *)newsp))
825 goto badframe;
826 regs->gpr[1] = newsp;
827 regs->gpr[3] = sig;
828 regs->gpr[4] = (unsigned long) &rt_sf->info;
829 regs->gpr[5] = (unsigned long) &rt_sf->uc;
830 regs->gpr[6] = (unsigned long) rt_sf;
831 regs->nip = (unsigned long) ka->sa.sa_handler;
832 regs->trap = 0;
833#ifdef CONFIG_PPC64
834 regs->result = 0;
835
836 if (test_thread_flag(TIF_SINGLESTEP))
837 ptrace_notify(SIGTRAP);
838#endif
839 return 1;
840
841badframe:
842#ifdef DEBUG_SIG
843 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
844 regs, frame, newsp);
845#endif
846 force_sigsegv(sig, current);
847 return 0;
848}
849
850static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
851{
852 sigset_t set;
853 struct mcontext __user *mcp;
854
855 if (get_sigset_t(&set, &ucp->uc_sigmask))
856 return -EFAULT;
857#ifdef CONFIG_PPC64
858 {
859 u32 cmcp;
860
861 if (__get_user(cmcp, &ucp->uc_regs))
862 return -EFAULT;
863 mcp = (struct mcontext __user *)(u64)cmcp;
864 }
865#else
866 if (__get_user(mcp, &ucp->uc_regs))
867 return -EFAULT;
868#endif
869 restore_sigmask(&set);
870 if (restore_user_regs(regs, mcp, sig))
871 return -EFAULT;
872
873 return 0;
874}
875
876long sys_swapcontext(struct ucontext __user *old_ctx,
877 struct ucontext __user *new_ctx,
878 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
879{
880 unsigned char tmp;
881
882 /* Context size is for future use. Right now, we only make sure
883 * we are passed something we understand
884 */
885 if (ctx_size < sizeof(struct ucontext))
886 return -EINVAL;
887
888 if (old_ctx != NULL) {
889 if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
890 || save_user_regs(regs, &old_ctx->uc_mcontext, 0)
891 || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
892 || __put_user(to_user_ptr(&old_ctx->uc_mcontext),
893 &old_ctx->uc_regs))
894 return -EFAULT;
895 }
896 if (new_ctx == NULL)
897 return 0;
898 if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx))
899 || __get_user(tmp, (u8 __user *) new_ctx)
900 || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1))
901 return -EFAULT;
902
903 /*
904 * If we get a fault copying the context into the kernel's
905 * image of the user's registers, we can't just return -EFAULT
906 * because the user's registers will be corrupted. For instance
907 * the NIP value may have been updated but not some of the
908 * other registers. Given that we have done the access_ok
909 * and successfully read the first and last bytes of the region
910 * above, this should only happen in an out-of-memory situation
911 * or if another thread unmaps the region containing the context.
912 * We kill the task with a SIGSEGV in this situation.
913 */
914 if (do_setcontext(new_ctx, regs, 0))
915 do_exit(SIGSEGV);
916 sigreturn_exit(regs);
917 /* doesn't actually return back to here */
918 return 0;
919}
920
921long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
922 struct pt_regs *regs)
923{
924 struct rt_sigframe __user *rt_sf;
925
926 /* Always make any pending restarted system calls return -EINTR */
927 current_thread_info()->restart_block.fn = do_no_restart_syscall;
928
929 rt_sf = (struct rt_sigframe __user *)
930 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
931 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
932 goto bad;
933 if (do_setcontext(&rt_sf->uc, regs, 1))
934 goto bad;
935
936 /*
937 * It's not clear whether or why it is desirable to save the
938 * sigaltstack setting on signal delivery and restore it on
939 * signal return. But other architectures do this and we have
940 * always done it up until now so it is probably better not to
941 * change it. -- paulus
942 */
943#ifdef CONFIG_PPC64
944 /*
945 * We use the compat_sys_ version that does the 32/64 bits conversion
946 * and takes userland pointer directly. What about error checking ?
947 * nobody does any...
948 */
949 compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
950 return (int)regs->result;
951#else
952 do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
953 sigreturn_exit(regs); /* doesn't return here */
954 return 0;
955#endif
956
957 bad:
958 force_sig(SIGSEGV, current);
959 return 0;
960}
961
962#ifdef CONFIG_PPC32
963int sys_debug_setcontext(struct ucontext __user *ctx,
964 int ndbg, struct sig_dbg_op __user *dbg,
965 int r6, int r7, int r8,
966 struct pt_regs *regs)
967{
968 struct sig_dbg_op op;
969 int i;
970 unsigned long new_msr = regs->msr;
971#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
972 unsigned long new_dbcr0 = current->thread.dbcr0;
973#endif
974
975 for (i=0; i<ndbg; i++) {
976 if (__copy_from_user(&op, dbg, sizeof(op)))
977 return -EFAULT;
978 switch (op.dbg_type) {
979 case SIG_DBG_SINGLE_STEPPING:
980#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
981 if (op.dbg_value) {
982 new_msr |= MSR_DE;
983 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
984 } else {
985 new_msr &= ~MSR_DE;
986 new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
987 }
988#else
989 if (op.dbg_value)
990 new_msr |= MSR_SE;
991 else
992 new_msr &= ~MSR_SE;
993#endif
994 break;
995 case SIG_DBG_BRANCH_TRACING:
996#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
997 return -EINVAL;
998#else
999 if (op.dbg_value)
1000 new_msr |= MSR_BE;
1001 else
1002 new_msr &= ~MSR_BE;
1003#endif
1004 break;
1005
1006 default:
1007 return -EINVAL;
1008 }
1009 }
1010
1011 /* We wait until here to actually install the values in the
1012 registers so if we fail in the above loop, it will not
1013 affect the contents of these registers. After this point,
1014 failure is a problem, anyway, and it's very unlikely unless
1015 the user is really doing something wrong. */
1016 regs->msr = new_msr;
1017#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1018 current->thread.dbcr0 = new_dbcr0;
1019#endif
1020
1021 /*
1022 * If we get a fault copying the context into the kernel's
1023 * image of the user's registers, we can't just return -EFAULT
1024 * because the user's registers will be corrupted. For instance
1025 * the NIP value may have been updated but not some of the
1026 * other registers. Given that we have done the access_ok
1027 * and successfully read the first and last bytes of the region
1028 * above, this should only happen in an out-of-memory situation
1029 * or if another thread unmaps the region containing the context.
1030 * We kill the task with a SIGSEGV in this situation.
1031 */
1032 if (do_setcontext(ctx, regs, 1)) {
1033 force_sig(SIGSEGV, current);
1034 goto out;
1035 }
1036
1037 /*
1038 * It's not clear whether or why it is desirable to save the
1039 * sigaltstack setting on signal delivery and restore it on
1040 * signal return. But other architectures do this and we have
1041 * always done it up until now so it is probably better not to
1042 * change it. -- paulus
1043 */
1044 do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
1045
1046 sigreturn_exit(regs);
1047 /* doesn't actually return back to here */
1048
1049 out:
1050 return 0;
1051}
1052#endif
1053
1054/*
1055 * OK, we're invoking a handler
1056 */
1057static int handle_signal(unsigned long sig, struct k_sigaction *ka,
1058 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs,
1059 unsigned long newsp)
1060{
1061 struct sigcontext __user *sc;
1062 struct sigregs __user *frame;
1063 unsigned long origsp = newsp;
1064
1065 /* Set up Signal Frame */
1066 newsp -= sizeof(struct sigregs);
1067 frame = (struct sigregs __user *) newsp;
1068
1069 /* Put a sigcontext on the stack */
1070 newsp -= sizeof(*sc);
1071 sc = (struct sigcontext __user *) newsp;
1072
1073 /* create a stack frame for the caller of the handler */
1074 newsp -= __SIGNAL_FRAMESIZE;
1075
1076 if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
1077 goto badframe;
1078
1079#if _NSIG != 64
1080#error "Please adjust handle_signal()"
1081#endif
1082 if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
1083 || __put_user(oldset->sig[0], &sc->oldmask)
1084#ifdef CONFIG_PPC64
1085 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1086#else
1087 || __put_user(oldset->sig[1], &sc->_unused[3])
1088#endif
1089 || __put_user(to_user_ptr(frame), &sc->regs)
1090 || __put_user(sig, &sc->signal))
1091 goto badframe;
1092
1093#ifdef CONFIG_PPC64
1094 if (vdso32_sigtramp && current->thread.vdso_base) {
1095 if (save_user_regs(regs, &frame->mctx, 0))
1096 goto badframe;
1097 regs->link = current->thread.vdso_base + vdso32_sigtramp;
1098 } else
1099#endif
1100 {
1101 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
1102 goto badframe;
1103 regs->link = (unsigned long) frame->mctx.tramp;
1104 }
1105
1106 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1107 goto badframe;
1108 regs->gpr[1] = newsp;
1109 regs->gpr[3] = sig;
1110 regs->gpr[4] = (unsigned long) sc;
1111 regs->nip = (unsigned long) ka->sa.sa_handler;
1112 regs->trap = 0;
1113#ifdef CONFIG_PPC64
1114 regs->result = 0;
1115
1116 if (test_thread_flag(TIF_SINGLESTEP))
1117 ptrace_notify(SIGTRAP);
1118#endif
1119
1120 return 1;
1121
1122badframe:
1123#ifdef DEBUG_SIG
1124 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
1125 regs, frame, newsp);
1126#endif
1127 force_sigsegv(sig, current);
1128 return 0;
1129}
1130
1131/*
1132 * Do a signal return; undo the signal stack.
1133 */
1134long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1135 struct pt_regs *regs)
1136{
1137 struct sigcontext __user *sc;
1138 struct sigcontext sigctx;
1139 struct mcontext __user *sr;
1140 sigset_t set;
1141
1142 /* Always make any pending restarted system calls return -EINTR */
1143 current_thread_info()->restart_block.fn = do_no_restart_syscall;
1144
1145 sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1146 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1147 goto badframe;
1148
1149#ifdef CONFIG_PPC64
1150 /*
1151 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1152 * unused part of the signal stackframe
1153 */
1154 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1155#else
1156 set.sig[0] = sigctx.oldmask;
1157 set.sig[1] = sigctx._unused[3];
1158#endif
1159 restore_sigmask(&set);
1160
1161 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1162 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1163 || restore_user_regs(regs, sr, 1))
1164 goto badframe;
1165
1166#ifdef CONFIG_PPC64
1167 return (int)regs->result;
1168#else
1169 sigreturn_exit(regs); /* doesn't return */
1170 return 0;
1171#endif
1172
1173badframe:
1174 force_sig(SIGSEGV, current);
1175 return 0;
1176}
1177
1178/*
1179 * Note that 'init' is a special process: it doesn't get signals it doesn't
1180 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1181 * mistake.
1182 */
1183int do_signal(sigset_t *oldset, struct pt_regs *regs)
1184{
1185 siginfo_t info;
1186 struct k_sigaction ka;
1187 unsigned int frame, newsp;
1188 int signr, ret;
1189
1190#ifdef CONFIG_PPC32
1191 if (try_to_freeze()) {
1192 signr = 0;
1193 if (!signal_pending(current))
1194 goto no_signal;
1195 }
1196#endif
1197
1198 if (!oldset)
1199 oldset = &current->blocked;
1200
1201 newsp = frame = 0;
1202
1203 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
1204#ifdef CONFIG_PPC32
1205no_signal:
1206#endif
1207 if (TRAP(regs) == 0x0C00 /* System Call! */
1208 && regs->ccr & 0x10000000 /* error signalled */
1209 && ((ret = regs->gpr[3]) == ERESTARTSYS
1210 || ret == ERESTARTNOHAND || ret == ERESTARTNOINTR
1211 || ret == ERESTART_RESTARTBLOCK)) {
1212
1213 if (signr > 0
1214 && (ret == ERESTARTNOHAND || ret == ERESTART_RESTARTBLOCK
1215 || (ret == ERESTARTSYS
1216 && !(ka.sa.sa_flags & SA_RESTART)))) {
1217 /* make the system call return an EINTR error */
1218 regs->result = -EINTR;
1219 regs->gpr[3] = EINTR;
1220 /* note that the cr0.SO bit is already set */
1221 } else {
1222 regs->nip -= 4; /* Back up & retry system call */
1223 regs->result = 0;
1224 regs->trap = 0;
1225 if (ret == ERESTART_RESTARTBLOCK)
1226 regs->gpr[0] = __NR_restart_syscall;
1227 else
1228 regs->gpr[3] = regs->orig_gpr3;
1229 }
1230 }
1231
1232 if (signr == 0)
1233 return 0; /* no signals delivered */
1234
1235 if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size
1236 && !on_sig_stack(regs->gpr[1]))
1237 newsp = current->sas_ss_sp + current->sas_ss_size;
1238 else
1239 newsp = regs->gpr[1];
1240 newsp &= ~0xfUL;
1241
1242#ifdef CONFIG_PPC64
1243 /*
1244 * Reenable the DABR before delivering the signal to
1245 * user space. The DABR will have been cleared if it
1246 * triggered inside the kernel.
1247 */
1248 if (current->thread.dabr)
1249 set_dabr(current->thread.dabr);
1250#endif
1251
1252 /* Whee! Actually deliver the signal. */
1253 if (ka.sa.sa_flags & SA_SIGINFO)
1254 ret = handle_rt_signal(signr, &ka, &info, oldset, regs, newsp);
1255 else
1256 ret = handle_signal(signr, &ka, &info, oldset, regs, newsp);
1257
1258 if (ret) {
1259 spin_lock_irq(&current->sighand->siglock);
1260 sigorsets(&current->blocked, &current->blocked,
1261 &ka.sa.sa_mask);
1262 if (!(ka.sa.sa_flags & SA_NODEFER))
1263 sigaddset(&current->blocked, signr);
1264 recalc_sigpending();
1265 spin_unlock_irq(&current->sighand->siglock);
1266 }
1267
1268 return ret;
1269}
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
new file mode 100644
index 000000000000..a8210ed5c686
--- /dev/null
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -0,0 +1,1008 @@
1/*
2 * sys_ppc32.c: Conversion between 32bit and 64bit native syscalls.
3 *
4 * Copyright (C) 2001 IBM
5 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
7 *
8 * These routines maintain argument size conversion between 32bit and 64bit
9 * environment.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/config.h>
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/fs.h>
21#include <linux/mm.h>
22#include <linux/file.h>
23#include <linux/signal.h>
24#include <linux/resource.h>
25#include <linux/times.h>
26#include <linux/utsname.h>
27#include <linux/timex.h>
28#include <linux/smp.h>
29#include <linux/smp_lock.h>
30#include <linux/sem.h>
31#include <linux/msg.h>
32#include <linux/shm.h>
33#include <linux/poll.h>
34#include <linux/personality.h>
35#include <linux/stat.h>
36#include <linux/mman.h>
37#include <linux/in.h>
38#include <linux/syscalls.h>
39#include <linux/unistd.h>
40#include <linux/sysctl.h>
41#include <linux/binfmts.h>
42#include <linux/security.h>
43#include <linux/compat.h>
44#include <linux/ptrace.h>
45#include <linux/elf.h>
46
47#include <asm/ptrace.h>
48#include <asm/types.h>
49#include <asm/ipc.h>
50#include <asm/uaccess.h>
51#include <asm/unistd.h>
52#include <asm/semaphore.h>
53#include <asm/time.h>
54#include <asm/mmu_context.h>
55#include <asm/systemcfg.h>
56#include <asm/ppc-pci.h>
57
58/* readdir & getdents */
59#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
60#define ROUND_UP(x) (((x)+sizeof(u32)-1) & ~(sizeof(u32)-1))
61
62struct old_linux_dirent32 {
63 u32 d_ino;
64 u32 d_offset;
65 unsigned short d_namlen;
66 char d_name[1];
67};
68
69struct readdir_callback32 {
70 struct old_linux_dirent32 __user * dirent;
71 int count;
72};
73
74static int fillonedir(void * __buf, const char * name, int namlen,
75 off_t offset, ino_t ino, unsigned int d_type)
76{
77 struct readdir_callback32 * buf = (struct readdir_callback32 *) __buf;
78 struct old_linux_dirent32 __user * dirent;
79
80 if (buf->count)
81 return -EINVAL;
82 buf->count++;
83 dirent = buf->dirent;
84 put_user(ino, &dirent->d_ino);
85 put_user(offset, &dirent->d_offset);
86 put_user(namlen, &dirent->d_namlen);
87 copy_to_user(dirent->d_name, name, namlen);
88 put_user(0, dirent->d_name + namlen);
89 return 0;
90}
91
92asmlinkage int old32_readdir(unsigned int fd, struct old_linux_dirent32 __user *dirent, unsigned int count)
93{
94 int error = -EBADF;
95 struct file * file;
96 struct readdir_callback32 buf;
97
98 file = fget(fd);
99 if (!file)
100 goto out;
101
102 buf.count = 0;
103 buf.dirent = dirent;
104
105 error = vfs_readdir(file, (filldir_t)fillonedir, &buf);
106 if (error < 0)
107 goto out_putf;
108 error = buf.count;
109
110out_putf:
111 fput(file);
112out:
113 return error;
114}
115
116asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp,
117 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
118 compat_uptr_t tvp_x)
119{
120 /* sign extend n */
121 return compat_sys_select((int)n, inp, outp, exp, compat_ptr(tvp_x));
122}
123
124int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
125{
126 long err;
127
128 if (stat->size > MAX_NON_LFS || !new_valid_dev(stat->dev) ||
129 !new_valid_dev(stat->rdev))
130 return -EOVERFLOW;
131
132 err = access_ok(VERIFY_WRITE, statbuf, sizeof(*statbuf)) ? 0 : -EFAULT;
133 err |= __put_user(new_encode_dev(stat->dev), &statbuf->st_dev);
134 err |= __put_user(stat->ino, &statbuf->st_ino);
135 err |= __put_user(stat->mode, &statbuf->st_mode);
136 err |= __put_user(stat->nlink, &statbuf->st_nlink);
137 err |= __put_user(stat->uid, &statbuf->st_uid);
138 err |= __put_user(stat->gid, &statbuf->st_gid);
139 err |= __put_user(new_encode_dev(stat->rdev), &statbuf->st_rdev);
140 err |= __put_user(stat->size, &statbuf->st_size);
141 err |= __put_user(stat->atime.tv_sec, &statbuf->st_atime);
142 err |= __put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec);
143 err |= __put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
144 err |= __put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec);
145 err |= __put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
146 err |= __put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec);
147 err |= __put_user(stat->blksize, &statbuf->st_blksize);
148 err |= __put_user(stat->blocks, &statbuf->st_blocks);
149 err |= __put_user(0, &statbuf->__unused4[0]);
150 err |= __put_user(0, &statbuf->__unused4[1]);
151
152 return err;
153}
154
155/* Note: it is necessary to treat option as an unsigned int,
156 * with the corresponding cast to a signed int to insure that the
157 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
158 * and the register representation of a signed int (msr in 64-bit mode) is performed.
159 */
160asmlinkage long compat_sys_sysfs(u32 option, u32 arg1, u32 arg2)
161{
162 return sys_sysfs((int)option, arg1, arg2);
163}
164
165/* Handle adjtimex compatibility. */
166struct timex32 {
167 u32 modes;
168 s32 offset, freq, maxerror, esterror;
169 s32 status, constant, precision, tolerance;
170 struct compat_timeval time;
171 s32 tick;
172 s32 ppsfreq, jitter, shift, stabil;
173 s32 jitcnt, calcnt, errcnt, stbcnt;
174 s32 :32; s32 :32; s32 :32; s32 :32;
175 s32 :32; s32 :32; s32 :32; s32 :32;
176 s32 :32; s32 :32; s32 :32; s32 :32;
177};
178
179extern int do_adjtimex(struct timex *);
180extern void ppc_adjtimex(void);
181
182asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp)
183{
184 struct timex txc;
185 int ret;
186
187 memset(&txc, 0, sizeof(struct timex));
188
189 if(get_user(txc.modes, &utp->modes) ||
190 __get_user(txc.offset, &utp->offset) ||
191 __get_user(txc.freq, &utp->freq) ||
192 __get_user(txc.maxerror, &utp->maxerror) ||
193 __get_user(txc.esterror, &utp->esterror) ||
194 __get_user(txc.status, &utp->status) ||
195 __get_user(txc.constant, &utp->constant) ||
196 __get_user(txc.precision, &utp->precision) ||
197 __get_user(txc.tolerance, &utp->tolerance) ||
198 __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
199 __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
200 __get_user(txc.tick, &utp->tick) ||
201 __get_user(txc.ppsfreq, &utp->ppsfreq) ||
202 __get_user(txc.jitter, &utp->jitter) ||
203 __get_user(txc.shift, &utp->shift) ||
204 __get_user(txc.stabil, &utp->stabil) ||
205 __get_user(txc.jitcnt, &utp->jitcnt) ||
206 __get_user(txc.calcnt, &utp->calcnt) ||
207 __get_user(txc.errcnt, &utp->errcnt) ||
208 __get_user(txc.stbcnt, &utp->stbcnt))
209 return -EFAULT;
210
211 ret = do_adjtimex(&txc);
212
213 /* adjust the conversion of TB to time of day to track adjtimex */
214 ppc_adjtimex();
215
216 if(put_user(txc.modes, &utp->modes) ||
217 __put_user(txc.offset, &utp->offset) ||
218 __put_user(txc.freq, &utp->freq) ||
219 __put_user(txc.maxerror, &utp->maxerror) ||
220 __put_user(txc.esterror, &utp->esterror) ||
221 __put_user(txc.status, &utp->status) ||
222 __put_user(txc.constant, &utp->constant) ||
223 __put_user(txc.precision, &utp->precision) ||
224 __put_user(txc.tolerance, &utp->tolerance) ||
225 __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
226 __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
227 __put_user(txc.tick, &utp->tick) ||
228 __put_user(txc.ppsfreq, &utp->ppsfreq) ||
229 __put_user(txc.jitter, &utp->jitter) ||
230 __put_user(txc.shift, &utp->shift) ||
231 __put_user(txc.stabil, &utp->stabil) ||
232 __put_user(txc.jitcnt, &utp->jitcnt) ||
233 __put_user(txc.calcnt, &utp->calcnt) ||
234 __put_user(txc.errcnt, &utp->errcnt) ||
235 __put_user(txc.stbcnt, &utp->stbcnt))
236 ret = -EFAULT;
237
238 return ret;
239}
240
241asmlinkage long compat_sys_pause(void)
242{
243 current->state = TASK_INTERRUPTIBLE;
244 schedule();
245
246 return -ERESTARTNOHAND;
247}
248
249static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i)
250{
251 long usec;
252
253 if (!access_ok(VERIFY_READ, i, sizeof(*i)))
254 return -EFAULT;
255 if (__get_user(o->tv_sec, &i->tv_sec))
256 return -EFAULT;
257 if (__get_user(usec, &i->tv_usec))
258 return -EFAULT;
259 o->tv_nsec = usec * 1000;
260 return 0;
261}
262
263static inline long put_tv32(struct compat_timeval __user *o, struct timeval *i)
264{
265 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
266 (__put_user(i->tv_sec, &o->tv_sec) |
267 __put_user(i->tv_usec, &o->tv_usec)));
268}
269
270struct sysinfo32 {
271 s32 uptime;
272 u32 loads[3];
273 u32 totalram;
274 u32 freeram;
275 u32 sharedram;
276 u32 bufferram;
277 u32 totalswap;
278 u32 freeswap;
279 unsigned short procs;
280 unsigned short pad;
281 u32 totalhigh;
282 u32 freehigh;
283 u32 mem_unit;
284 char _f[20-2*sizeof(int)-sizeof(int)];
285};
286
287asmlinkage long compat_sys_sysinfo(struct sysinfo32 __user *info)
288{
289 struct sysinfo s;
290 int ret, err;
291 int bitcount=0;
292 mm_segment_t old_fs = get_fs ();
293
294 /* The __user cast is valid due to set_fs() */
295 set_fs (KERNEL_DS);
296 ret = sys_sysinfo((struct sysinfo __user *)&s);
297 set_fs (old_fs);
298
299 /* Check to see if any memory value is too large for 32-bit and
300 * scale down if needed.
301 */
302 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
303 while (s.mem_unit < PAGE_SIZE) {
304 s.mem_unit <<= 1;
305 bitcount++;
306 }
307 s.totalram >>=bitcount;
308 s.freeram >>= bitcount;
309 s.sharedram >>= bitcount;
310 s.bufferram >>= bitcount;
311 s.totalswap >>= bitcount;
312 s.freeswap >>= bitcount;
313 s.totalhigh >>= bitcount;
314 s.freehigh >>= bitcount;
315 }
316
317 err = put_user (s.uptime, &info->uptime);
318 err |= __put_user (s.loads[0], &info->loads[0]);
319 err |= __put_user (s.loads[1], &info->loads[1]);
320 err |= __put_user (s.loads[2], &info->loads[2]);
321 err |= __put_user (s.totalram, &info->totalram);
322 err |= __put_user (s.freeram, &info->freeram);
323 err |= __put_user (s.sharedram, &info->sharedram);
324 err |= __put_user (s.bufferram, &info->bufferram);
325 err |= __put_user (s.totalswap, &info->totalswap);
326 err |= __put_user (s.freeswap, &info->freeswap);
327 err |= __put_user (s.procs, &info->procs);
328 err |= __put_user (s.totalhigh, &info->totalhigh);
329 err |= __put_user (s.freehigh, &info->freehigh);
330 err |= __put_user (s.mem_unit, &info->mem_unit);
331 if (err)
332 return -EFAULT;
333
334 return ret;
335}
336
337
338
339
340/* Translations due to time_t size differences. Which affects all
341 sorts of things, like timeval and itimerval. */
342extern struct timezone sys_tz;
343
344asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
345{
346 if (tv) {
347 struct timeval ktv;
348 do_gettimeofday(&ktv);
349 if (put_tv32(tv, &ktv))
350 return -EFAULT;
351 }
352 if (tz) {
353 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
354 return -EFAULT;
355 }
356
357 return 0;
358}
359
360
361
362asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
363{
364 struct timespec kts;
365 struct timezone ktz;
366
367 if (tv) {
368 if (get_ts32(&kts, tv))
369 return -EFAULT;
370 }
371 if (tz) {
372 if (copy_from_user(&ktz, tz, sizeof(ktz)))
373 return -EFAULT;
374 }
375
376 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
377}
378
379#ifdef CONFIG_SYSVIPC
380long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
381 u32 fifth)
382{
383 int version;
384
385 version = call >> 16; /* hack for backward compatibility */
386 call &= 0xffff;
387
388 switch (call) {
389
390 case SEMTIMEDOP:
391 if (fifth)
392 /* sign extend semid */
393 return compat_sys_semtimedop((int)first,
394 compat_ptr(ptr), second,
395 compat_ptr(fifth));
396 /* else fall through for normal semop() */
397 case SEMOP:
398 /* struct sembuf is the same on 32 and 64bit :)) */
399 /* sign extend semid */
400 return sys_semtimedop((int)first, compat_ptr(ptr), second,
401 NULL);
402 case SEMGET:
403 /* sign extend key, nsems */
404 return sys_semget((int)first, (int)second, third);
405 case SEMCTL:
406 /* sign extend semid, semnum */
407 return compat_sys_semctl((int)first, (int)second, third,
408 compat_ptr(ptr));
409
410 case MSGSND:
411 /* sign extend msqid */
412 return compat_sys_msgsnd((int)first, (int)second, third,
413 compat_ptr(ptr));
414 case MSGRCV:
415 /* sign extend msqid, msgtyp */
416 return compat_sys_msgrcv((int)first, second, (int)fifth,
417 third, version, compat_ptr(ptr));
418 case MSGGET:
419 /* sign extend key */
420 return sys_msgget((int)first, second);
421 case MSGCTL:
422 /* sign extend msqid */
423 return compat_sys_msgctl((int)first, second, compat_ptr(ptr));
424
425 case SHMAT:
426 /* sign extend shmid */
427 return compat_sys_shmat((int)first, second, third, version,
428 compat_ptr(ptr));
429 case SHMDT:
430 return sys_shmdt(compat_ptr(ptr));
431 case SHMGET:
432 /* sign extend key_t */
433 return sys_shmget((int)first, second, third);
434 case SHMCTL:
435 /* sign extend shmid */
436 return compat_sys_shmctl((int)first, second, compat_ptr(ptr));
437
438 default:
439 return -ENOSYS;
440 }
441
442 return -ENOSYS;
443}
444#endif
445
446/* Note: it is necessary to treat out_fd and in_fd as unsigned ints,
447 * with the corresponding cast to a signed int to insure that the
448 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
449 * and the register representation of a signed int (msr in 64-bit mode) is performed.
450 */
451asmlinkage long compat_sys_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offset, u32 count)
452{
453 mm_segment_t old_fs = get_fs();
454 int ret;
455 off_t of;
456 off_t __user *up;
457
458 if (offset && get_user(of, offset))
459 return -EFAULT;
460
461 /* The __user pointer cast is valid because of the set_fs() */
462 set_fs(KERNEL_DS);
463 up = offset ? (off_t __user *) &of : NULL;
464 ret = sys_sendfile((int)out_fd, (int)in_fd, up, count);
465 set_fs(old_fs);
466
467 if (offset && put_user(of, offset))
468 return -EFAULT;
469
470 return ret;
471}
472
473asmlinkage int compat_sys_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count)
474{
475 mm_segment_t old_fs = get_fs();
476 int ret;
477 loff_t lof;
478 loff_t __user *up;
479
480 if (offset && get_user(lof, offset))
481 return -EFAULT;
482
483 /* The __user pointer cast is valid because of the set_fs() */
484 set_fs(KERNEL_DS);
485 up = offset ? (loff_t __user *) &lof : NULL;
486 ret = sys_sendfile64(out_fd, in_fd, up, count);
487 set_fs(old_fs);
488
489 if (offset && put_user(lof, offset))
490 return -EFAULT;
491
492 return ret;
493}
494
495long compat_sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
496 unsigned long a3, unsigned long a4, unsigned long a5,
497 struct pt_regs *regs)
498{
499 int error;
500 char * filename;
501
502 filename = getname((char __user *) a0);
503 error = PTR_ERR(filename);
504 if (IS_ERR(filename))
505 goto out;
506 flush_fp_to_thread(current);
507 flush_altivec_to_thread(current);
508
509 error = compat_do_execve(filename, compat_ptr(a1), compat_ptr(a2), regs);
510
511 if (error == 0) {
512 task_lock(current);
513 current->ptrace &= ~PT_DTRACE;
514 task_unlock(current);
515 }
516 putname(filename);
517
518out:
519 return error;
520}
521
522/* Note: it is necessary to treat option as an unsigned int,
523 * with the corresponding cast to a signed int to insure that the
524 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
525 * and the register representation of a signed int (msr in 64-bit mode) is performed.
526 */
527asmlinkage long compat_sys_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
528{
529 return sys_prctl((int)option,
530 (unsigned long) arg2,
531 (unsigned long) arg3,
532 (unsigned long) arg4,
533 (unsigned long) arg5);
534}
535
536/* Note: it is necessary to treat pid as an unsigned int,
537 * with the corresponding cast to a signed int to insure that the
538 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
539 * and the register representation of a signed int (msr in 64-bit mode) is performed.
540 */
541asmlinkage long compat_sys_sched_rr_get_interval(u32 pid, struct compat_timespec __user *interval)
542{
543 struct timespec t;
544 int ret;
545 mm_segment_t old_fs = get_fs ();
546
547 /* The __user pointer cast is valid because of the set_fs() */
548 set_fs (KERNEL_DS);
549 ret = sys_sched_rr_get_interval((int)pid, (struct timespec __user *) &t);
550 set_fs (old_fs);
551 if (put_compat_timespec(&t, interval))
552 return -EFAULT;
553 return ret;
554}
555
556asmlinkage int compat_sys_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
557{
558 return sys_pciconfig_read((unsigned long) bus,
559 (unsigned long) dfn,
560 (unsigned long) off,
561 (unsigned long) len,
562 compat_ptr(ubuf));
563}
564
565asmlinkage int compat_sys_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
566{
567 return sys_pciconfig_write((unsigned long) bus,
568 (unsigned long) dfn,
569 (unsigned long) off,
570 (unsigned long) len,
571 compat_ptr(ubuf));
572}
573
574asmlinkage int compat_sys_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn)
575{
576 return sys_pciconfig_iobase(which, in_bus, in_devfn);
577}
578
579
580/* Note: it is necessary to treat mode as an unsigned int,
581 * with the corresponding cast to a signed int to insure that the
582 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
583 * and the register representation of a signed int (msr in 64-bit mode) is performed.
584 */
585asmlinkage long compat_sys_access(const char __user * filename, u32 mode)
586{
587 return sys_access(filename, (int)mode);
588}
589
590
591/* Note: it is necessary to treat mode as an unsigned int,
592 * with the corresponding cast to a signed int to insure that the
593 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
594 * and the register representation of a signed int (msr in 64-bit mode) is performed.
595 */
596asmlinkage long compat_sys_creat(const char __user * pathname, u32 mode)
597{
598 return sys_creat(pathname, (int)mode);
599}
600
601
602/* Note: it is necessary to treat pid and options as unsigned ints,
603 * with the corresponding cast to a signed int to insure that the
604 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
605 * and the register representation of a signed int (msr in 64-bit mode) is performed.
606 */
607asmlinkage long compat_sys_waitpid(u32 pid, unsigned int __user * stat_addr, u32 options)
608{
609 return sys_waitpid((int)pid, stat_addr, (int)options);
610}
611
612
613/* Note: it is necessary to treat gidsetsize as an unsigned int,
614 * with the corresponding cast to a signed int to insure that the
615 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
616 * and the register representation of a signed int (msr in 64-bit mode) is performed.
617 */
618asmlinkage long compat_sys_getgroups(u32 gidsetsize, gid_t __user *grouplist)
619{
620 return sys_getgroups((int)gidsetsize, grouplist);
621}
622
623
624/* Note: it is necessary to treat pid as an unsigned int,
625 * with the corresponding cast to a signed int to insure that the
626 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
627 * and the register representation of a signed int (msr in 64-bit mode) is performed.
628 */
629asmlinkage long compat_sys_getpgid(u32 pid)
630{
631 return sys_getpgid((int)pid);
632}
633
634
635
636/* Note: it is necessary to treat pid as an unsigned int,
637 * with the corresponding cast to a signed int to insure that the
638 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
639 * and the register representation of a signed int (msr in 64-bit mode) is performed.
640 */
641asmlinkage long compat_sys_getsid(u32 pid)
642{
643 return sys_getsid((int)pid);
644}
645
646
647/* Note: it is necessary to treat pid and sig as unsigned ints,
648 * with the corresponding cast to a signed int to insure that the
649 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
650 * and the register representation of a signed int (msr in 64-bit mode) is performed.
651 */
652asmlinkage long compat_sys_kill(u32 pid, u32 sig)
653{
654 return sys_kill((int)pid, (int)sig);
655}
656
657
658/* Note: it is necessary to treat mode as an unsigned int,
659 * with the corresponding cast to a signed int to insure that the
660 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
661 * and the register representation of a signed int (msr in 64-bit mode) is performed.
662 */
663asmlinkage long compat_sys_mkdir(const char __user * pathname, u32 mode)
664{
665 return sys_mkdir(pathname, (int)mode);
666}
667
668long compat_sys_nice(u32 increment)
669{
670 /* sign extend increment */
671 return sys_nice((int)increment);
672}
673
674off_t ppc32_lseek(unsigned int fd, u32 offset, unsigned int origin)
675{
676 /* sign extend n */
677 return sys_lseek(fd, (int)offset, origin);
678}
679
680/* Note: it is necessary to treat bufsiz as an unsigned int,
681 * with the corresponding cast to a signed int to insure that the
682 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
683 * and the register representation of a signed int (msr in 64-bit mode) is performed.
684 */
685asmlinkage long compat_sys_readlink(const char __user * path, char __user * buf, u32 bufsiz)
686{
687 return sys_readlink(path, buf, (int)bufsiz);
688}
689
690/* Note: it is necessary to treat option as an unsigned int,
691 * with the corresponding cast to a signed int to insure that the
692 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
693 * and the register representation of a signed int (msr in 64-bit mode) is performed.
694 */
695asmlinkage long compat_sys_sched_get_priority_max(u32 policy)
696{
697 return sys_sched_get_priority_max((int)policy);
698}
699
700
701/* Note: it is necessary to treat policy as an unsigned int,
702 * with the corresponding cast to a signed int to insure that the
703 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
704 * and the register representation of a signed int (msr in 64-bit mode) is performed.
705 */
706asmlinkage long compat_sys_sched_get_priority_min(u32 policy)
707{
708 return sys_sched_get_priority_min((int)policy);
709}
710
711
712/* Note: it is necessary to treat pid as an unsigned int,
713 * with the corresponding cast to a signed int to insure that the
714 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
715 * and the register representation of a signed int (msr in 64-bit mode) is performed.
716 */
717asmlinkage long compat_sys_sched_getparam(u32 pid, struct sched_param __user *param)
718{
719 return sys_sched_getparam((int)pid, param);
720}
721
722
723/* Note: it is necessary to treat pid as an unsigned int,
724 * with the corresponding cast to a signed int to insure that the
725 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
726 * and the register representation of a signed int (msr in 64-bit mode) is performed.
727 */
728asmlinkage long compat_sys_sched_getscheduler(u32 pid)
729{
730 return sys_sched_getscheduler((int)pid);
731}
732
733
734/* Note: it is necessary to treat pid as an unsigned int,
735 * with the corresponding cast to a signed int to insure that the
736 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
737 * and the register representation of a signed int (msr in 64-bit mode) is performed.
738 */
739asmlinkage long compat_sys_sched_setparam(u32 pid, struct sched_param __user *param)
740{
741 return sys_sched_setparam((int)pid, param);
742}
743
744
745/* Note: it is necessary to treat pid and policy as unsigned ints,
746 * with the corresponding cast to a signed int to insure that the
747 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
748 * and the register representation of a signed int (msr in 64-bit mode) is performed.
749 */
750asmlinkage long compat_sys_sched_setscheduler(u32 pid, u32 policy, struct sched_param __user *param)
751{
752 return sys_sched_setscheduler((int)pid, (int)policy, param);
753}
754
755
756/* Note: it is necessary to treat len as an unsigned int,
757 * with the corresponding cast to a signed int to insure that the
758 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
759 * and the register representation of a signed int (msr in 64-bit mode) is performed.
760 */
761asmlinkage long compat_sys_setdomainname(char __user *name, u32 len)
762{
763 return sys_setdomainname(name, (int)len);
764}
765
766
767/* Note: it is necessary to treat gidsetsize as an unsigned int,
768 * with the corresponding cast to a signed int to insure that the
769 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
770 * and the register representation of a signed int (msr in 64-bit mode) is performed.
771 */
772asmlinkage long compat_sys_setgroups(u32 gidsetsize, gid_t __user *grouplist)
773{
774 return sys_setgroups((int)gidsetsize, grouplist);
775}
776
777
778asmlinkage long compat_sys_sethostname(char __user *name, u32 len)
779{
780 /* sign extend len */
781 return sys_sethostname(name, (int)len);
782}
783
784
785/* Note: it is necessary to treat pid and pgid as unsigned ints,
786 * with the corresponding cast to a signed int to insure that the
787 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
788 * and the register representation of a signed int (msr in 64-bit mode) is performed.
789 */
790asmlinkage long compat_sys_setpgid(u32 pid, u32 pgid)
791{
792 return sys_setpgid((int)pid, (int)pgid);
793}
794
795long compat_sys_getpriority(u32 which, u32 who)
796{
797 /* sign extend which and who */
798 return sys_getpriority((int)which, (int)who);
799}
800
801long compat_sys_setpriority(u32 which, u32 who, u32 niceval)
802{
803 /* sign extend which, who and niceval */
804 return sys_setpriority((int)which, (int)who, (int)niceval);
805}
806
807long compat_sys_ioprio_get(u32 which, u32 who)
808{
809 /* sign extend which and who */
810 return sys_ioprio_get((int)which, (int)who);
811}
812
813long compat_sys_ioprio_set(u32 which, u32 who, u32 ioprio)
814{
815 /* sign extend which, who and ioprio */
816 return sys_ioprio_set((int)which, (int)who, (int)ioprio);
817}
818
819/* Note: it is necessary to treat newmask as an unsigned int,
820 * with the corresponding cast to a signed int to insure that the
821 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
822 * and the register representation of a signed int (msr in 64-bit mode) is performed.
823 */
824asmlinkage long compat_sys_ssetmask(u32 newmask)
825{
826 return sys_ssetmask((int) newmask);
827}
828
829asmlinkage long compat_sys_syslog(u32 type, char __user * buf, u32 len)
830{
831 /* sign extend len */
832 return sys_syslog(type, buf, (int)len);
833}
834
835
836/* Note: it is necessary to treat mask as an unsigned int,
837 * with the corresponding cast to a signed int to insure that the
838 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
839 * and the register representation of a signed int (msr in 64-bit mode) is performed.
840 */
841asmlinkage long compat_sys_umask(u32 mask)
842{
843 return sys_umask((int)mask);
844}
845
846#ifdef CONFIG_SYSCTL
847struct __sysctl_args32 {
848 u32 name;
849 int nlen;
850 u32 oldval;
851 u32 oldlenp;
852 u32 newval;
853 u32 newlen;
854 u32 __unused[4];
855};
856
857asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
858{
859 struct __sysctl_args32 tmp;
860 int error;
861 size_t oldlen;
862 size_t __user *oldlenp = NULL;
863 unsigned long addr = (((unsigned long)&args->__unused[0]) + 7) & ~7;
864
865 if (copy_from_user(&tmp, args, sizeof(tmp)))
866 return -EFAULT;
867
868 if (tmp.oldval && tmp.oldlenp) {
869 /* Duh, this is ugly and might not work if sysctl_args
870 is in read-only memory, but do_sysctl does indirectly
871 a lot of uaccess in both directions and we'd have to
872 basically copy the whole sysctl.c here, and
873 glibc's __sysctl uses rw memory for the structure
874 anyway. */
875 oldlenp = (size_t __user *)addr;
876 if (get_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
877 put_user(oldlen, oldlenp))
878 return -EFAULT;
879 }
880
881 lock_kernel();
882 error = do_sysctl(compat_ptr(tmp.name), tmp.nlen,
883 compat_ptr(tmp.oldval), oldlenp,
884 compat_ptr(tmp.newval), tmp.newlen);
885 unlock_kernel();
886 if (oldlenp) {
887 if (!error) {
888 if (get_user(oldlen, oldlenp) ||
889 put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
890 error = -EFAULT;
891 }
892 copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
893 }
894 return error;
895}
896#endif
897
898unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
899 unsigned long prot, unsigned long flags,
900 unsigned long fd, unsigned long pgoff)
901{
902 /* This should remain 12 even if PAGE_SIZE changes */
903 return sys_mmap(addr, len, prot, flags, fd, pgoff << 12);
904}
905
906long compat_sys_tgkill(u32 tgid, u32 pid, int sig)
907{
908 /* sign extend tgid, pid */
909 return sys_tgkill((int)tgid, (int)pid, sig);
910}
911
912/*
913 * long long munging:
914 * The 32 bit ABI passes long longs in an odd even register pair.
915 */
916
917compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count,
918 u32 reg6, u32 poshi, u32 poslo)
919{
920 return sys_pread64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
921}
922
923compat_ssize_t compat_sys_pwrite64(unsigned int fd, char __user *ubuf, compat_size_t count,
924 u32 reg6, u32 poshi, u32 poslo)
925{
926 return sys_pwrite64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
927}
928
929compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offhi, u32 offlo, u32 count)
930{
931 return sys_readahead(fd, ((loff_t)offhi << 32) | offlo, count);
932}
933
934asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4,
935 unsigned long high, unsigned long low)
936{
937 return sys_truncate(path, (high << 32) | low);
938}
939
940asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long high,
941 unsigned long low)
942{
943 return sys_ftruncate(fd, (high << 32) | low);
944}
945
946long ppc32_lookup_dcookie(u32 cookie_high, u32 cookie_low, char __user *buf,
947 size_t len)
948{
949 return sys_lookup_dcookie((u64)cookie_high << 32 | cookie_low,
950 buf, len);
951}
952
953long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
954 size_t len, int advice)
955{
956 return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low, len,
957 advice);
958}
959
960long ppc32_timer_create(clockid_t clock,
961 struct compat_sigevent __user *ev32,
962 timer_t __user *timer_id)
963{
964 sigevent_t event;
965 timer_t t;
966 long err;
967 mm_segment_t savefs;
968
969 if (ev32 == NULL)
970 return sys_timer_create(clock, NULL, timer_id);
971
972 if (get_compat_sigevent(&event, ev32))
973 return -EFAULT;
974
975 if (!access_ok(VERIFY_WRITE, timer_id, sizeof(timer_t)))
976 return -EFAULT;
977
978 savefs = get_fs();
979 set_fs(KERNEL_DS);
980 /* The __user pointer casts are valid due to the set_fs() */
981 err = sys_timer_create(clock,
982 (sigevent_t __user *) &event,
983 (timer_t __user *) &t);
984 set_fs(savefs);
985
986 if (err == 0)
987 err = __put_user(t, timer_id);
988
989 return err;
990}
991
992asmlinkage long compat_sys_add_key(const char __user *_type,
993 const char __user *_description,
994 const void __user *_payload,
995 u32 plen,
996 u32 ringid)
997{
998 return sys_add_key(_type, _description, _payload, plen, ringid);
999}
1000
1001asmlinkage long compat_sys_request_key(const char __user *_type,
1002 const char __user *_description,
1003 const char __user *_callout_info,
1004 u32 destringid)
1005{
1006 return sys_request_key(_type, _description, _callout_info, destringid);
1007}
1008
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
new file mode 100644
index 000000000000..f72ced11212d
--- /dev/null
+++ b/arch/powerpc/kernel/syscalls.c
@@ -0,0 +1,358 @@
1/*
2 * Implementation of various system calls for Linux/PowerPC
3 *
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Derived from "arch/i386/kernel/sys_i386.c"
7 * Adapted from the i386 version by Gary Thomas
8 * Modified by Cort Dougan (cort@cs.nmt.edu)
9 * and Paul Mackerras (paulus@cs.anu.edu.au).
10 *
11 * This file contains various random system calls that
12 * have a non-standard calling sequence on the Linux/PPC
13 * platform.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/sched.h>
24#include <linux/syscalls.h>
25#include <linux/mm.h>
26#include <linux/smp.h>
27#include <linux/smp_lock.h>
28#include <linux/sem.h>
29#include <linux/msg.h>
30#include <linux/shm.h>
31#include <linux/stat.h>
32#include <linux/mman.h>
33#include <linux/sys.h>
34#include <linux/ipc.h>
35#include <linux/utsname.h>
36#include <linux/file.h>
37#include <linux/init.h>
38#include <linux/personality.h>
39
40#include <asm/uaccess.h>
41#include <asm/ipc.h>
42#include <asm/semaphore.h>
43#include <asm/time.h>
44#include <asm/unistd.h>
45
46extern unsigned long wall_jiffies;
47
48
49/*
50 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
51 *
52 * This is really horribly ugly.
53 */
54int sys_ipc(uint call, int first, unsigned long second, long third,
55 void __user *ptr, long fifth)
56{
57 int version, ret;
58
59 version = call >> 16; /* hack for backward compatibility */
60 call &= 0xffff;
61
62 ret = -ENOSYS;
63 switch (call) {
64 case SEMOP:
65 ret = sys_semtimedop(first, (struct sembuf __user *)ptr,
66 (unsigned)second, NULL);
67 break;
68 case SEMTIMEDOP:
69 ret = sys_semtimedop(first, (struct sembuf __user *)ptr,
70 (unsigned)second,
71 (const struct timespec __user *) fifth);
72 break;
73 case SEMGET:
74 ret = sys_semget (first, (int)second, third);
75 break;
76 case SEMCTL: {
77 union semun fourth;
78
79 ret = -EINVAL;
80 if (!ptr)
81 break;
82 if ((ret = get_user(fourth.__pad, (void __user * __user *)ptr)))
83 break;
84 ret = sys_semctl(first, (int)second, third, fourth);
85 break;
86 }
87 case MSGSND:
88 ret = sys_msgsnd(first, (struct msgbuf __user *)ptr,
89 (size_t)second, third);
90 break;
91 case MSGRCV:
92 switch (version) {
93 case 0: {
94 struct ipc_kludge tmp;
95
96 ret = -EINVAL;
97 if (!ptr)
98 break;
99 if ((ret = copy_from_user(&tmp,
100 (struct ipc_kludge __user *) ptr,
101 sizeof (tmp)) ? -EFAULT : 0))
102 break;
103 ret = sys_msgrcv(first, tmp.msgp, (size_t) second,
104 tmp.msgtyp, third);
105 break;
106 }
107 default:
108 ret = sys_msgrcv (first, (struct msgbuf __user *) ptr,
109 (size_t)second, fifth, third);
110 break;
111 }
112 break;
113 case MSGGET:
114 ret = sys_msgget((key_t)first, (int)second);
115 break;
116 case MSGCTL:
117 ret = sys_msgctl(first, (int)second,
118 (struct msqid_ds __user *)ptr);
119 break;
120 case SHMAT: {
121 ulong raddr;
122 ret = do_shmat(first, (char __user *)ptr, (int)second, &raddr);
123 if (ret)
124 break;
125 ret = put_user(raddr, (ulong __user *) third);
126 break;
127 }
128 case SHMDT:
129 ret = sys_shmdt((char __user *)ptr);
130 break;
131 case SHMGET:
132 ret = sys_shmget(first, (size_t)second, third);
133 break;
134 case SHMCTL:
135 ret = sys_shmctl(first, (int)second,
136 (struct shmid_ds __user *)ptr);
137 break;
138 }
139
140 return ret;
141}
142
143/*
144 * sys_pipe() is the normal C calling standard for creating
145 * a pipe. It's not the way unix traditionally does this, though.
146 */
147int sys_pipe(int __user *fildes)
148{
149 int fd[2];
150 int error;
151
152 error = do_pipe(fd);
153 if (!error) {
154 if (copy_to_user(fildes, fd, 2*sizeof(int)))
155 error = -EFAULT;
156 }
157 return error;
158}
159
160static inline unsigned long do_mmap2(unsigned long addr, size_t len,
161 unsigned long prot, unsigned long flags,
162 unsigned long fd, unsigned long off, int shift)
163{
164 struct file * file = NULL;
165 unsigned long ret = -EINVAL;
166
167 if (shift) {
168 if (off & ((1 << shift) - 1))
169 goto out;
170 off >>= shift;
171 }
172
173 ret = -EBADF;
174 if (!(flags & MAP_ANONYMOUS)) {
175 if (!(file = fget(fd)))
176 goto out;
177 }
178
179 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
180
181 down_write(&current->mm->mmap_sem);
182 ret = do_mmap_pgoff(file, addr, len, prot, flags, off);
183 up_write(&current->mm->mmap_sem);
184 if (file)
185 fput(file);
186out:
187 return ret;
188}
189
190unsigned long sys_mmap2(unsigned long addr, size_t len,
191 unsigned long prot, unsigned long flags,
192 unsigned long fd, unsigned long pgoff)
193{
194 return do_mmap2(addr, len, prot, flags, fd, pgoff, PAGE_SHIFT-12);
195}
196
197unsigned long sys_mmap(unsigned long addr, size_t len,
198 unsigned long prot, unsigned long flags,
199 unsigned long fd, off_t offset)
200{
201 return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT);
202}
203
204#ifdef CONFIG_PPC32
205/*
206 * Due to some executables calling the wrong select we sometimes
207 * get wrong args. This determines how the args are being passed
208 * (a single ptr to them all args passed) then calls
209 * sys_select() with the appropriate args. -- Cort
210 */
211int
212ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp)
213{
214 if ( (unsigned long)n >= 4096 )
215 {
216 unsigned long __user *buffer = (unsigned long __user *)n;
217 if (!access_ok(VERIFY_READ, buffer, 5*sizeof(unsigned long))
218 || __get_user(n, buffer)
219 || __get_user(inp, ((fd_set __user * __user *)(buffer+1)))
220 || __get_user(outp, ((fd_set __user * __user *)(buffer+2)))
221 || __get_user(exp, ((fd_set __user * __user *)(buffer+3)))
222 || __get_user(tvp, ((struct timeval __user * __user *)(buffer+4))))
223 return -EFAULT;
224 }
225 return sys_select(n, inp, outp, exp, tvp);
226}
227#endif
228
229#ifdef CONFIG_PPC64
230long ppc64_personality(unsigned long personality)
231{
232 long ret;
233
234 if (personality(current->personality) == PER_LINUX32
235 && personality == PER_LINUX)
236 personality = PER_LINUX32;
237 ret = sys_personality(personality);
238 if (ret == PER_LINUX32)
239 ret = PER_LINUX;
240 return ret;
241}
242#endif
243
244#ifdef CONFIG_PPC64
245#define OVERRIDE_MACHINE (personality(current->personality) == PER_LINUX32)
246#else
247#define OVERRIDE_MACHINE 0
248#endif
249
250static inline int override_machine(char *mach)
251{
252 if (OVERRIDE_MACHINE) {
253 /* change ppc64 to ppc */
254 if (__put_user(0, mach+3) || __put_user(0, mach+4))
255 return -EFAULT;
256 }
257 return 0;
258}
259
260long ppc_newuname(struct new_utsname __user * name)
261{
262 int err = 0;
263
264 down_read(&uts_sem);
265 if (copy_to_user(name, &system_utsname, sizeof(*name)))
266 err = -EFAULT;
267 up_read(&uts_sem);
268 if (!err)
269 err = override_machine(name->machine);
270 return err;
271}
272
273int sys_uname(struct old_utsname __user *name)
274{
275 int err = 0;
276
277 down_read(&uts_sem);
278 if (copy_to_user(name, &system_utsname, sizeof(*name)))
279 err = -EFAULT;
280 up_read(&uts_sem);
281 if (!err)
282 err = override_machine(name->machine);
283 return err;
284}
285
286int sys_olduname(struct oldold_utsname __user *name)
287{
288 int error;
289
290 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
291 return -EFAULT;
292
293 down_read(&uts_sem);
294 error = __copy_to_user(&name->sysname, &system_utsname.sysname,
295 __OLD_UTS_LEN);
296 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
297 error |= __copy_to_user(&name->nodename, &system_utsname.nodename,
298 __OLD_UTS_LEN);
299 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
300 error |= __copy_to_user(&name->release, &system_utsname.release,
301 __OLD_UTS_LEN);
302 error |= __put_user(0, name->release + __OLD_UTS_LEN);
303 error |= __copy_to_user(&name->version, &system_utsname.version,
304 __OLD_UTS_LEN);
305 error |= __put_user(0, name->version + __OLD_UTS_LEN);
306 error |= __copy_to_user(&name->machine, &system_utsname.machine,
307 __OLD_UTS_LEN);
308 error |= override_machine(name->machine);
309 up_read(&uts_sem);
310
311 return error? -EFAULT: 0;
312}
313
314#ifdef CONFIG_PPC64
315time_t sys64_time(time_t __user * tloc)
316{
317 time_t secs;
318 time_t usecs;
319
320 long tb_delta = tb_ticks_since(tb_last_stamp);
321 tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
322
323 secs = xtime.tv_sec;
324 usecs = (xtime.tv_nsec/1000) + tb_delta / tb_ticks_per_usec;
325 while (usecs >= USEC_PER_SEC) {
326 ++secs;
327 usecs -= USEC_PER_SEC;
328 }
329
330 if (tloc) {
331 if (put_user(secs,tloc))
332 secs = -EFAULT;
333 }
334
335 return secs;
336}
337#endif
338
339long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
340 u32 len_high, u32 len_low)
341{
342 return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
343 (u64)len_high << 32 | len_low, advice);
344}
345
346void do_show_syscall(unsigned long r3, unsigned long r4, unsigned long r5,
347 unsigned long r6, unsigned long r7, unsigned long r8,
348 struct pt_regs *regs)
349{
350 printk("syscall %ld(%lx, %lx, %lx, %lx, %lx, %lx) regs=%p current=%p"
351 " cpu=%d\n", regs->gpr[0], r3, r4, r5, r6, r7, r8, regs,
352 current, smp_processor_id());
353}
354
355void do_show_syscall_exit(unsigned long r3)
356{
357 printk(" -> %lx, current=%p cpu=%d\n", r3, current, smp_processor_id());
358}
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
new file mode 100644
index 000000000000..65eaea91b499
--- /dev/null
+++ b/arch/powerpc/kernel/systbl.S
@@ -0,0 +1,321 @@
1/*
2 * This file contains the table of syscall-handling functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/config.h>
18#include <asm/ppc_asm.h>
19
20#ifdef CONFIG_PPC64
21#define SYSCALL(func) .llong .sys_##func,.sys_##func
22#define COMPAT_SYS(func) .llong .sys_##func,.compat_sys_##func
23#define PPC_SYS(func) .llong .ppc_##func,.ppc_##func
24#define OLDSYS(func) .llong .sys_ni_syscall,.sys_ni_syscall
25#define SYS32ONLY(func) .llong .sys_ni_syscall,.compat_sys_##func
26#define SYSX(f, f3264, f32) .llong .f,.f3264
27#else
28#define SYSCALL(func) .long sys_##func
29#define COMPAT_SYS(func) .long sys_##func
30#define PPC_SYS(func) .long ppc_##func
31#define OLDSYS(func) .long sys_##func
32#define SYS32ONLY(func) .long sys_##func
33#define SYSX(f, f3264, f32) .long f32
34#endif
35
36#ifdef CONFIG_PPC64
37#define sys_sigpending sys_ni_syscall
38#define sys_old_getrlimit sys_ni_syscall
39#else
40#define ppc_rtas sys_ni_syscall
41#endif
42
43_GLOBAL(sys_call_table)
44SYSCALL(restart_syscall)
45SYSCALL(exit)
46PPC_SYS(fork)
47SYSCALL(read)
48SYSCALL(write)
49COMPAT_SYS(open)
50SYSCALL(close)
51COMPAT_SYS(waitpid)
52COMPAT_SYS(creat)
53SYSCALL(link)
54SYSCALL(unlink)
55COMPAT_SYS(execve)
56SYSCALL(chdir)
57SYSX(sys64_time,compat_sys_time,sys_time)
58SYSCALL(mknod)
59SYSCALL(chmod)
60SYSCALL(lchown)
61SYSCALL(ni_syscall)
62OLDSYS(stat)
63SYSX(sys_lseek,ppc32_lseek,sys_lseek)
64SYSCALL(getpid)
65COMPAT_SYS(mount)
66SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
67SYSCALL(setuid)
68SYSCALL(getuid)
69COMPAT_SYS(stime)
70COMPAT_SYS(ptrace)
71SYSCALL(alarm)
72OLDSYS(fstat)
73COMPAT_SYS(pause)
74COMPAT_SYS(utime)
75SYSCALL(ni_syscall)
76SYSCALL(ni_syscall)
77COMPAT_SYS(access)
78COMPAT_SYS(nice)
79SYSCALL(ni_syscall)
80SYSCALL(sync)
81COMPAT_SYS(kill)
82SYSCALL(rename)
83COMPAT_SYS(mkdir)
84SYSCALL(rmdir)
85SYSCALL(dup)
86SYSCALL(pipe)
87COMPAT_SYS(times)
88SYSCALL(ni_syscall)
89SYSCALL(brk)
90SYSCALL(setgid)
91SYSCALL(getgid)
92SYSCALL(signal)
93SYSCALL(geteuid)
94SYSCALL(getegid)
95SYSCALL(acct)
96SYSCALL(umount)
97SYSCALL(ni_syscall)
98COMPAT_SYS(ioctl)
99COMPAT_SYS(fcntl)
100SYSCALL(ni_syscall)
101COMPAT_SYS(setpgid)
102SYSCALL(ni_syscall)
103SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
104COMPAT_SYS(umask)
105SYSCALL(chroot)
106SYSCALL(ustat)
107SYSCALL(dup2)
108SYSCALL(getppid)
109SYSCALL(getpgrp)
110SYSCALL(setsid)
111SYS32ONLY(sigaction)
112SYSCALL(sgetmask)
113COMPAT_SYS(ssetmask)
114SYSCALL(setreuid)
115SYSCALL(setregid)
116SYSX(sys_ni_syscall,ppc32_sigsuspend,ppc_sigsuspend)
117COMPAT_SYS(sigpending)
118COMPAT_SYS(sethostname)
119COMPAT_SYS(setrlimit)
120COMPAT_SYS(old_getrlimit)
121COMPAT_SYS(getrusage)
122COMPAT_SYS(gettimeofday)
123COMPAT_SYS(settimeofday)
124COMPAT_SYS(getgroups)
125COMPAT_SYS(setgroups)
126SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select)
127SYSCALL(symlink)
128OLDSYS(lstat)
129COMPAT_SYS(readlink)
130SYSCALL(uselib)
131SYSCALL(swapon)
132SYSCALL(reboot)
133SYSX(sys_ni_syscall,old32_readdir,old_readdir)
134SYSCALL(mmap)
135SYSCALL(munmap)
136SYSCALL(truncate)
137SYSCALL(ftruncate)
138SYSCALL(fchmod)
139SYSCALL(fchown)
140COMPAT_SYS(getpriority)
141COMPAT_SYS(setpriority)
142SYSCALL(ni_syscall)
143COMPAT_SYS(statfs)
144COMPAT_SYS(fstatfs)
145SYSCALL(ni_syscall)
146COMPAT_SYS(socketcall)
147COMPAT_SYS(syslog)
148COMPAT_SYS(setitimer)
149COMPAT_SYS(getitimer)
150COMPAT_SYS(newstat)
151COMPAT_SYS(newlstat)
152COMPAT_SYS(newfstat)
153SYSX(sys_ni_syscall,sys_uname,sys_uname)
154SYSCALL(ni_syscall)
155SYSCALL(vhangup)
156SYSCALL(ni_syscall)
157SYSCALL(ni_syscall)
158COMPAT_SYS(wait4)
159SYSCALL(swapoff)
160COMPAT_SYS(sysinfo)
161COMPAT_SYS(ipc)
162SYSCALL(fsync)
163SYSX(sys_ni_syscall,ppc32_sigreturn,sys_sigreturn)
164PPC_SYS(clone)
165COMPAT_SYS(setdomainname)
166PPC_SYS(newuname)
167SYSCALL(ni_syscall)
168COMPAT_SYS(adjtimex)
169SYSCALL(mprotect)
170SYSX(sys_ni_syscall,compat_sys_sigprocmask,sys_sigprocmask)
171SYSCALL(ni_syscall)
172SYSCALL(init_module)
173SYSCALL(delete_module)
174SYSCALL(ni_syscall)
175SYSCALL(quotactl)
176COMPAT_SYS(getpgid)
177SYSCALL(fchdir)
178SYSCALL(bdflush)
179COMPAT_SYS(sysfs)
180SYSX(ppc64_personality,ppc64_personality,sys_personality)
181SYSCALL(ni_syscall)
182SYSCALL(setfsuid)
183SYSCALL(setfsgid)
184SYSCALL(llseek)
185COMPAT_SYS(getdents)
186SYSX(sys_select,ppc32_select,ppc_select)
187SYSCALL(flock)
188SYSCALL(msync)
189COMPAT_SYS(readv)
190COMPAT_SYS(writev)
191COMPAT_SYS(getsid)
192SYSCALL(fdatasync)
193COMPAT_SYS(sysctl)
194SYSCALL(mlock)
195SYSCALL(munlock)
196SYSCALL(mlockall)
197SYSCALL(munlockall)
198COMPAT_SYS(sched_setparam)
199COMPAT_SYS(sched_getparam)
200COMPAT_SYS(sched_setscheduler)
201COMPAT_SYS(sched_getscheduler)
202SYSCALL(sched_yield)
203COMPAT_SYS(sched_get_priority_max)
204COMPAT_SYS(sched_get_priority_min)
205COMPAT_SYS(sched_rr_get_interval)
206COMPAT_SYS(nanosleep)
207SYSCALL(mremap)
208SYSCALL(setresuid)
209SYSCALL(getresuid)
210SYSCALL(ni_syscall)
211SYSCALL(poll)
212COMPAT_SYS(nfsservctl)
213SYSCALL(setresgid)
214SYSCALL(getresgid)
215COMPAT_SYS(prctl)
216SYSX(ppc64_rt_sigreturn,ppc32_rt_sigreturn,sys_rt_sigreturn)
217COMPAT_SYS(rt_sigaction)
218COMPAT_SYS(rt_sigprocmask)
219COMPAT_SYS(rt_sigpending)
220COMPAT_SYS(rt_sigtimedwait)
221COMPAT_SYS(rt_sigqueueinfo)
222SYSX(ppc64_rt_sigsuspend,ppc32_rt_sigsuspend,ppc_rt_sigsuspend)
223COMPAT_SYS(pread64)
224COMPAT_SYS(pwrite64)
225SYSCALL(chown)
226SYSCALL(getcwd)
227SYSCALL(capget)
228SYSCALL(capset)
229COMPAT_SYS(sigaltstack)
230SYSX(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
231SYSCALL(ni_syscall)
232SYSCALL(ni_syscall)
233PPC_SYS(vfork)
234COMPAT_SYS(getrlimit)
235COMPAT_SYS(readahead)
236SYS32ONLY(mmap2)
237SYS32ONLY(truncate64)
238SYS32ONLY(ftruncate64)
239SYSX(sys_ni_syscall,sys_stat64,sys_stat64)
240SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64)
241SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64)
242COMPAT_SYS(pciconfig_read)
243COMPAT_SYS(pciconfig_write)
244COMPAT_SYS(pciconfig_iobase)
245SYSCALL(ni_syscall)
246SYSCALL(getdents64)
247SYSCALL(pivot_root)
248SYSX(sys_ni_syscall,compat_sys_fcntl64,sys_fcntl64)
249SYSCALL(madvise)
250SYSCALL(mincore)
251SYSCALL(gettid)
252SYSCALL(tkill)
253SYSCALL(setxattr)
254SYSCALL(lsetxattr)
255SYSCALL(fsetxattr)
256SYSCALL(getxattr)
257SYSCALL(lgetxattr)
258SYSCALL(fgetxattr)
259SYSCALL(listxattr)
260SYSCALL(llistxattr)
261SYSCALL(flistxattr)
262SYSCALL(removexattr)
263SYSCALL(lremovexattr)
264SYSCALL(fremovexattr)
265COMPAT_SYS(futex)
266COMPAT_SYS(sched_setaffinity)
267COMPAT_SYS(sched_getaffinity)
268SYSCALL(ni_syscall)
269SYSCALL(ni_syscall)
270SYS32ONLY(sendfile64)
271COMPAT_SYS(io_setup)
272SYSCALL(io_destroy)
273COMPAT_SYS(io_getevents)
274COMPAT_SYS(io_submit)
275SYSCALL(io_cancel)
276SYSCALL(set_tid_address)
277SYSX(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
278SYSCALL(exit_group)
279SYSX(sys_lookup_dcookie,ppc32_lookup_dcookie,sys_lookup_dcookie)
280SYSCALL(epoll_create)
281SYSCALL(epoll_ctl)
282SYSCALL(epoll_wait)
283SYSCALL(remap_file_pages)
284SYSX(sys_timer_create,ppc32_timer_create,sys_timer_create)
285COMPAT_SYS(timer_settime)
286COMPAT_SYS(timer_gettime)
287SYSCALL(timer_getoverrun)
288SYSCALL(timer_delete)
289COMPAT_SYS(clock_settime)
290COMPAT_SYS(clock_gettime)
291COMPAT_SYS(clock_getres)
292COMPAT_SYS(clock_nanosleep)
293SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
294COMPAT_SYS(tgkill)
295COMPAT_SYS(utimes)
296COMPAT_SYS(statfs64)
297COMPAT_SYS(fstatfs64)
298SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64)
299PPC_SYS(rtas)
300OLDSYS(debug_setcontext)
301SYSCALL(ni_syscall)
302SYSCALL(ni_syscall)
303COMPAT_SYS(mbind)
304COMPAT_SYS(get_mempolicy)
305COMPAT_SYS(set_mempolicy)
306COMPAT_SYS(mq_open)
307SYSCALL(mq_unlink)
308COMPAT_SYS(mq_timedsend)
309COMPAT_SYS(mq_timedreceive)
310COMPAT_SYS(mq_notify)
311COMPAT_SYS(mq_getsetattr)
312COMPAT_SYS(kexec_load)
313COMPAT_SYS(add_key)
314COMPAT_SYS(request_key)
315COMPAT_SYS(keyctl)
316COMPAT_SYS(waitid)
317COMPAT_SYS(ioprio_set)
318COMPAT_SYS(ioprio_get)
319SYSCALL(inotify_init)
320SYSCALL(inotify_add_watch)
321SYSCALL(inotify_rm_watch)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
new file mode 100644
index 000000000000..23436b6c1881
--- /dev/null
+++ b/arch/powerpc/kernel/time.c
@@ -0,0 +1,1005 @@
1/*
2 * Common time routines among all ppc machines.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8 *
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK).
14 *
15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
17 *
18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time. (for iSeries, we calibrate the timebase
21 * against the Titan chip's clock.)
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
25 *
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
28 *
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
33 */
34
35#include <linux/config.h>
36#include <linux/errno.h>
37#include <linux/module.h>
38#include <linux/sched.h>
39#include <linux/kernel.h>
40#include <linux/param.h>
41#include <linux/string.h>
42#include <linux/mm.h>
43#include <linux/interrupt.h>
44#include <linux/timex.h>
45#include <linux/kernel_stat.h>
46#include <linux/time.h>
47#include <linux/init.h>
48#include <linux/profile.h>
49#include <linux/cpu.h>
50#include <linux/security.h>
51#include <linux/percpu.h>
52#include <linux/rtc.h>
53
54#include <asm/io.h>
55#include <asm/processor.h>
56#include <asm/nvram.h>
57#include <asm/cache.h>
58#include <asm/machdep.h>
59#include <asm/uaccess.h>
60#include <asm/time.h>
61#include <asm/prom.h>
62#include <asm/irq.h>
63#include <asm/div64.h>
64#ifdef CONFIG_PPC64
65#include <asm/systemcfg.h>
66#include <asm/firmware.h>
67#endif
68#ifdef CONFIG_PPC_ISERIES
69#include <asm/iSeries/ItLpQueue.h>
70#include <asm/iSeries/HvCallXm.h>
71#endif
72
73/* keep track of when we need to update the rtc */
74time_t last_rtc_update;
75extern int piranha_simulator;
76#ifdef CONFIG_PPC_ISERIES
77unsigned long iSeries_recal_titan = 0;
78unsigned long iSeries_recal_tb = 0;
79static unsigned long first_settimeofday = 1;
80#endif
81
82/* The decrementer counts down by 128 every 128ns on a 601. */
83#define DECREMENTER_COUNT_601 (1000000000 / HZ)
84
85#define XSEC_PER_SEC (1024*1024)
86
87#ifdef CONFIG_PPC64
88#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
89#else
90/* compute ((xsec << 12) * max) >> 32 */
91#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
92#endif
93
94unsigned long tb_ticks_per_jiffy;
95unsigned long tb_ticks_per_usec = 100; /* sane default */
96EXPORT_SYMBOL(tb_ticks_per_usec);
97unsigned long tb_ticks_per_sec;
98u64 tb_to_xs;
99unsigned tb_to_us;
100unsigned long processor_freq;
101DEFINE_SPINLOCK(rtc_lock);
102EXPORT_SYMBOL_GPL(rtc_lock);
103
104u64 tb_to_ns_scale;
105unsigned tb_to_ns_shift;
106
107struct gettimeofday_struct do_gtod;
108
109extern unsigned long wall_jiffies;
110
111extern struct timezone sys_tz;
112static long timezone_offset;
113
114void ppc_adjtimex(void);
115
116static unsigned adjusting_time = 0;
117
118unsigned long ppc_proc_freq;
119unsigned long ppc_tb_freq;
120
121#ifdef CONFIG_PPC32 /* XXX for now */
122#define boot_cpuid 0
123#endif
124
125u64 tb_last_jiffy __cacheline_aligned_in_smp;
126unsigned long tb_last_stamp;
127
128/*
129 * Note that on ppc32 this only stores the bottom 32 bits of
130 * the timebase value, but that's enough to tell when a jiffy
131 * has passed.
132 */
133DEFINE_PER_CPU(unsigned long, last_jiffy);
134
135static __inline__ void timer_check_rtc(void)
136{
137 /*
138 * update the rtc when needed, this should be performed on the
139 * right fraction of a second. Half or full second ?
140 * Full second works on mk48t59 clocks, others need testing.
141 * Note that this update is basically only used through
142 * the adjtimex system calls. Setting the HW clock in
143 * any other way is a /dev/rtc and userland business.
144 * This is still wrong by -0.5/+1.5 jiffies because of the
145 * timer interrupt resolution and possible delay, but here we
146 * hit a quantization limit which can only be solved by higher
147 * resolution timers and decoupling time management from timer
148 * interrupts. This is also wrong on the clocks
149 * which require being written at the half second boundary.
150 * We should have an rtc call that only sets the minutes and
151 * seconds like on Intel to avoid problems with non UTC clocks.
152 */
153 if (ppc_md.set_rtc_time && ntp_synced() &&
154 xtime.tv_sec - last_rtc_update >= 659 &&
155 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ &&
156 jiffies - wall_jiffies == 1) {
157 struct rtc_time tm;
158 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
159 tm.tm_year -= 1900;
160 tm.tm_mon -= 1;
161 if (ppc_md.set_rtc_time(&tm) == 0)
162 last_rtc_update = xtime.tv_sec + 1;
163 else
164 /* Try again one minute later */
165 last_rtc_update += 60;
166 }
167}
168
169/*
170 * This version of gettimeofday has microsecond resolution.
171 */
172static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val)
173{
174 unsigned long sec, usec;
175 u64 tb_ticks, xsec;
176 struct gettimeofday_vars *temp_varp;
177 u64 temp_tb_to_xs, temp_stamp_xsec;
178
179 /*
180 * These calculations are faster (gets rid of divides)
181 * if done in units of 1/2^20 rather than microseconds.
182 * The conversion to microseconds at the end is done
183 * without a divide (and in fact, without a multiply)
184 */
185 temp_varp = do_gtod.varp;
186 tb_ticks = tb_val - temp_varp->tb_orig_stamp;
187 temp_tb_to_xs = temp_varp->tb_to_xs;
188 temp_stamp_xsec = temp_varp->stamp_xsec;
189 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
190 sec = xsec / XSEC_PER_SEC;
191 usec = (unsigned long)xsec & (XSEC_PER_SEC - 1);
192 usec = SCALE_XSEC(usec, 1000000);
193
194 tv->tv_sec = sec;
195 tv->tv_usec = usec;
196}
197
198void do_gettimeofday(struct timeval *tv)
199{
200 if (__USE_RTC()) {
201 /* do this the old way */
202 unsigned long flags, seq;
203 unsigned int sec, nsec, usec, lost;
204
205 do {
206 seq = read_seqbegin_irqsave(&xtime_lock, flags);
207 sec = xtime.tv_sec;
208 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp);
209 lost = jiffies - wall_jiffies;
210 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
211 usec = nsec / 1000 + lost * (1000000 / HZ);
212 while (usec >= 1000000) {
213 usec -= 1000000;
214 ++sec;
215 }
216 tv->tv_sec = sec;
217 tv->tv_usec = usec;
218 return;
219 }
220 __do_gettimeofday(tv, get_tb());
221}
222
223EXPORT_SYMBOL(do_gettimeofday);
224
225/* Synchronize xtime with do_gettimeofday */
226
227static inline void timer_sync_xtime(unsigned long cur_tb)
228{
229#ifdef CONFIG_PPC64
230 /* why do we do this? */
231 struct timeval my_tv;
232
233 __do_gettimeofday(&my_tv, cur_tb);
234
235 if (xtime.tv_sec <= my_tv.tv_sec) {
236 xtime.tv_sec = my_tv.tv_sec;
237 xtime.tv_nsec = my_tv.tv_usec * 1000;
238 }
239#endif
240}
241
242/*
243 * There are two copies of tb_to_xs and stamp_xsec so that no
244 * lock is needed to access and use these values in
245 * do_gettimeofday. We alternate the copies and as long as a
246 * reasonable time elapses between changes, there will never
247 * be inconsistent values. ntpd has a minimum of one minute
248 * between updates.
249 */
250static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
251 u64 new_tb_to_xs)
252{
253 unsigned temp_idx;
254 struct gettimeofday_vars *temp_varp;
255
256 temp_idx = (do_gtod.var_idx == 0);
257 temp_varp = &do_gtod.vars[temp_idx];
258
259 temp_varp->tb_to_xs = new_tb_to_xs;
260 temp_varp->tb_orig_stamp = new_tb_stamp;
261 temp_varp->stamp_xsec = new_stamp_xsec;
262 smp_mb();
263 do_gtod.varp = temp_varp;
264 do_gtod.var_idx = temp_idx;
265
266#ifdef CONFIG_PPC64
267 /*
268 * tb_update_count is used to allow the userspace gettimeofday code
269 * to assure itself that it sees a consistent view of the tb_to_xs and
270 * stamp_xsec variables. It reads the tb_update_count, then reads
271 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
272 * the two values of tb_update_count match and are even then the
273 * tb_to_xs and stamp_xsec values are consistent. If not, then it
274 * loops back and reads them again until this criteria is met.
275 */
276 ++(systemcfg->tb_update_count);
277 smp_wmb();
278 systemcfg->tb_orig_stamp = new_tb_stamp;
279 systemcfg->stamp_xsec = new_stamp_xsec;
280 systemcfg->tb_to_xs = new_tb_to_xs;
281 smp_wmb();
282 ++(systemcfg->tb_update_count);
283#endif
284}
285
286/*
287 * When the timebase - tb_orig_stamp gets too big, we do a manipulation
288 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
289 * difference tb - tb_orig_stamp small enough to always fit inside a
290 * 32 bits number. This is a requirement of our fast 32 bits userland
291 * implementation in the vdso. If we "miss" a call to this function
292 * (interrupt latency, CPU locked in a spinlock, ...) and we end up
293 * with a too big difference, then the vdso will fallback to calling
294 * the syscall
295 */
296static __inline__ void timer_recalc_offset(u64 cur_tb)
297{
298 unsigned long offset;
299 u64 new_stamp_xsec;
300
301 if (__USE_RTC())
302 return;
303 offset = cur_tb - do_gtod.varp->tb_orig_stamp;
304 if ((offset & 0x80000000u) == 0)
305 return;
306 new_stamp_xsec = do_gtod.varp->stamp_xsec
307 + mulhdu(offset, do_gtod.varp->tb_to_xs);
308 update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs);
309}
310
311#ifdef CONFIG_SMP
312unsigned long profile_pc(struct pt_regs *regs)
313{
314 unsigned long pc = instruction_pointer(regs);
315
316 if (in_lock_functions(pc))
317 return regs->link;
318
319 return pc;
320}
321EXPORT_SYMBOL(profile_pc);
322#endif
323
324#ifdef CONFIG_PPC_ISERIES
325
326/*
327 * This function recalibrates the timebase based on the 49-bit time-of-day
328 * value in the Titan chip. The Titan is much more accurate than the value
329 * returned by the service processor for the timebase frequency.
330 */
331
332static void iSeries_tb_recal(void)
333{
334 struct div_result divres;
335 unsigned long titan, tb;
336 tb = get_tb();
337 titan = HvCallXm_loadTod();
338 if ( iSeries_recal_titan ) {
339 unsigned long tb_ticks = tb - iSeries_recal_tb;
340 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
341 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
342 unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;
343 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
344 char sign = '+';
345 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
346 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
347
348 if ( tick_diff < 0 ) {
349 tick_diff = -tick_diff;
350 sign = '-';
351 }
352 if ( tick_diff ) {
353 if ( tick_diff < tb_ticks_per_jiffy/25 ) {
354 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
355 new_tb_ticks_per_jiffy, sign, tick_diff );
356 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
357 tb_ticks_per_sec = new_tb_ticks_per_sec;
358 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
359 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
360 tb_to_xs = divres.result_low;
361 do_gtod.varp->tb_to_xs = tb_to_xs;
362 systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
363 systemcfg->tb_to_xs = tb_to_xs;
364 }
365 else {
366 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
367 " new tb_ticks_per_jiffy = %lu\n"
368 " old tb_ticks_per_jiffy = %lu\n",
369 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
370 }
371 }
372 }
373 iSeries_recal_titan = titan;
374 iSeries_recal_tb = tb;
375}
376#endif
377
378/*
379 * For iSeries shared processors, we have to let the hypervisor
380 * set the hardware decrementer. We set a virtual decrementer
381 * in the lppaca and call the hypervisor if the virtual
382 * decrementer is less than the current value in the hardware
383 * decrementer. (almost always the new decrementer value will
384 * be greater than the current hardware decementer so the hypervisor
385 * call will not be needed)
386 */
387
388/*
389 * timer_interrupt - gets called when the decrementer overflows,
390 * with interrupts disabled.
391 */
392void timer_interrupt(struct pt_regs * regs)
393{
394 int next_dec;
395 int cpu = smp_processor_id();
396 unsigned long ticks;
397
398#ifdef CONFIG_PPC32
399 if (atomic_read(&ppc_n_lost_interrupts) != 0)
400 do_IRQ(regs);
401#endif
402
403 irq_enter();
404
405 profile_tick(CPU_PROFILING, regs);
406
407#ifdef CONFIG_PPC_ISERIES
408 get_paca()->lppaca.int_dword.fields.decr_int = 0;
409#endif
410
411 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
412 >= tb_ticks_per_jiffy) {
413 /* Update last_jiffy */
414 per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy;
415 /* Handle RTCL overflow on 601 */
416 if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000)
417 per_cpu(last_jiffy, cpu) -= 1000000000;
418
419 /*
420 * We cannot disable the decrementer, so in the period
421 * between this cpu's being marked offline in cpu_online_map
422 * and calling stop-self, it is taking timer interrupts.
423 * Avoid calling into the scheduler rebalancing code if this
424 * is the case.
425 */
426 if (!cpu_is_offline(cpu))
427 update_process_times(user_mode(regs));
428
429 /*
430 * No need to check whether cpu is offline here; boot_cpuid
431 * should have been fixed up by now.
432 */
433 if (cpu != boot_cpuid)
434 continue;
435
436 write_seqlock(&xtime_lock);
437 tb_last_jiffy += tb_ticks_per_jiffy;
438 tb_last_stamp = per_cpu(last_jiffy, cpu);
439 timer_recalc_offset(tb_last_jiffy);
440 do_timer(regs);
441 timer_sync_xtime(tb_last_jiffy);
442 timer_check_rtc();
443 write_sequnlock(&xtime_lock);
444 if (adjusting_time && (time_adjust == 0))
445 ppc_adjtimex();
446 }
447
448 next_dec = tb_ticks_per_jiffy - ticks;
449 set_dec(next_dec);
450
451#ifdef CONFIG_PPC_ISERIES
452 if (hvlpevent_is_pending())
453 process_hvlpevents(regs);
454#endif
455
456#ifdef CONFIG_PPC64
457 /* collect purr register values often, for accurate calculations */
458 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
459 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
460 cu->current_tb = mfspr(SPRN_PURR);
461 }
462#endif
463
464 irq_exit();
465}
466
467void wakeup_decrementer(void)
468{
469 int i;
470
471 set_dec(tb_ticks_per_jiffy);
472 /*
473 * We don't expect this to be called on a machine with a 601,
474 * so using get_tbl is fine.
475 */
476 tb_last_stamp = tb_last_jiffy = get_tb();
477 for_each_cpu(i)
478 per_cpu(last_jiffy, i) = tb_last_stamp;
479}
480
481#ifdef CONFIG_SMP
482void __init smp_space_timers(unsigned int max_cpus)
483{
484 int i;
485 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
486 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
487
488 for_each_cpu(i) {
489 if (i != boot_cpuid) {
490 previous_tb += offset;
491 per_cpu(last_jiffy, i) = previous_tb;
492 }
493 }
494}
495#endif
496
497/*
498 * Scheduler clock - returns current time in nanosec units.
499 *
500 * Note: mulhdu(a, b) (multiply high double unsigned) returns
501 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
502 * are 64-bit unsigned numbers.
503 */
504unsigned long long sched_clock(void)
505{
506 if (__USE_RTC())
507 return get_rtc();
508 return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift;
509}
510
511int do_settimeofday(struct timespec *tv)
512{
513 time_t wtm_sec, new_sec = tv->tv_sec;
514 long wtm_nsec, new_nsec = tv->tv_nsec;
515 unsigned long flags;
516 long int tb_delta;
517 u64 new_xsec, tb_delta_xs;
518
519 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
520 return -EINVAL;
521
522 write_seqlock_irqsave(&xtime_lock, flags);
523
524 /*
525 * Updating the RTC is not the job of this code. If the time is
526 * stepped under NTP, the RTC will be updated after STA_UNSYNC
527 * is cleared. Tools like clock/hwclock either copy the RTC
528 * to the system time, in which case there is no point in writing
529 * to the RTC again, or write to the RTC but then they don't call
530 * settimeofday to perform this operation.
531 */
532#ifdef CONFIG_PPC_ISERIES
533 if (first_settimeofday) {
534 iSeries_tb_recal();
535 first_settimeofday = 0;
536 }
537#endif
538 tb_delta = tb_ticks_since(tb_last_stamp);
539 tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
540 tb_delta_xs = mulhdu(tb_delta, do_gtod.varp->tb_to_xs);
541
542 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
543 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
544
545 set_normalized_timespec(&xtime, new_sec, new_nsec);
546 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
547
548 /* In case of a large backwards jump in time with NTP, we want the
549 * clock to be updated as soon as the PLL is again in lock.
550 */
551 last_rtc_update = new_sec - 658;
552
553 ntp_clear();
554
555 new_xsec = 0;
556 if (new_nsec != 0) {
557 new_xsec = (u64)new_nsec * XSEC_PER_SEC;
558 do_div(new_xsec, NSEC_PER_SEC);
559 }
560 new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs;
561 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
562
563#ifdef CONFIG_PPC64
564 systemcfg->tz_minuteswest = sys_tz.tz_minuteswest;
565 systemcfg->tz_dsttime = sys_tz.tz_dsttime;
566#endif
567
568 write_sequnlock_irqrestore(&xtime_lock, flags);
569 clock_was_set();
570 return 0;
571}
572
573EXPORT_SYMBOL(do_settimeofday);
574
575void __init generic_calibrate_decr(void)
576{
577 struct device_node *cpu;
578 unsigned int *fp;
579 int node_found;
580
581 /*
582 * The cpu node should have a timebase-frequency property
583 * to tell us the rate at which the decrementer counts.
584 */
585 cpu = of_find_node_by_type(NULL, "cpu");
586
587 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
588 node_found = 0;
589 if (cpu != 0) {
590 fp = (unsigned int *)get_property(cpu, "timebase-frequency",
591 NULL);
592 if (fp != 0) {
593 node_found = 1;
594 ppc_tb_freq = *fp;
595 }
596 }
597 if (!node_found)
598 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
599 "(not found)\n");
600
601 ppc_proc_freq = DEFAULT_PROC_FREQ;
602 node_found = 0;
603 if (cpu != 0) {
604 fp = (unsigned int *)get_property(cpu, "clock-frequency",
605 NULL);
606 if (fp != 0) {
607 node_found = 1;
608 ppc_proc_freq = *fp;
609 }
610 }
611#ifdef CONFIG_BOOKE
612 /* Set the time base to zero */
613 mtspr(SPRN_TBWL, 0);
614 mtspr(SPRN_TBWU, 0);
615
616 /* Clear any pending timer interrupts */
617 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
618
619 /* Enable decrementer interrupt */
620 mtspr(SPRN_TCR, TCR_DIE);
621#endif
622 if (!node_found)
623 printk(KERN_ERR "WARNING: Estimating processor frequency "
624 "(not found)\n");
625
626 of_node_put(cpu);
627}
628
629unsigned long get_boot_time(void)
630{
631 struct rtc_time tm;
632
633 if (ppc_md.get_boot_time)
634 return ppc_md.get_boot_time();
635 if (!ppc_md.get_rtc_time)
636 return 0;
637 ppc_md.get_rtc_time(&tm);
638 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
639 tm.tm_hour, tm.tm_min, tm.tm_sec);
640}
641
642/* This function is only called on the boot processor */
643void __init time_init(void)
644{
645 unsigned long flags;
646 unsigned long tm = 0;
647 struct div_result res;
648 u64 scale;
649 unsigned shift;
650
651 if (ppc_md.time_init != NULL)
652 timezone_offset = ppc_md.time_init();
653
654 if (__USE_RTC()) {
655 /* 601 processor: dec counts down by 128 every 128ns */
656 ppc_tb_freq = 1000000000;
657 tb_last_stamp = get_rtcl();
658 tb_last_jiffy = tb_last_stamp;
659 } else {
660 /* Normal PowerPC with timebase register */
661 ppc_md.calibrate_decr();
662 printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
663 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
664 printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n",
665 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
666 tb_last_stamp = tb_last_jiffy = get_tb();
667 }
668
669 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
670 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
671 tb_ticks_per_usec = ppc_tb_freq / 1000000;
672 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
673 div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res);
674 tb_to_xs = res.result_low;
675
676#ifdef CONFIG_PPC64
677 get_paca()->default_decr = tb_ticks_per_jiffy;
678#endif
679
680 /*
681 * Compute scale factor for sched_clock.
682 * The calibrate_decr() function has set tb_ticks_per_sec,
683 * which is the timebase frequency.
684 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
685 * the 128-bit result as a 64.64 fixed-point number.
686 * We then shift that number right until it is less than 1.0,
687 * giving us the scale factor and shift count to use in
688 * sched_clock().
689 */
690 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
691 scale = res.result_low;
692 for (shift = 0; res.result_high != 0; ++shift) {
693 scale = (scale >> 1) | (res.result_high << 63);
694 res.result_high >>= 1;
695 }
696 tb_to_ns_scale = scale;
697 tb_to_ns_shift = shift;
698
699#ifdef CONFIG_PPC_ISERIES
700 if (!piranha_simulator)
701#endif
702 tm = get_boot_time();
703
704 write_seqlock_irqsave(&xtime_lock, flags);
705 xtime.tv_sec = tm;
706 xtime.tv_nsec = 0;
707 do_gtod.varp = &do_gtod.vars[0];
708 do_gtod.var_idx = 0;
709 do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
710 __get_cpu_var(last_jiffy) = tb_last_stamp;
711 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
712 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
713 do_gtod.varp->tb_to_xs = tb_to_xs;
714 do_gtod.tb_to_us = tb_to_us;
715#ifdef CONFIG_PPC64
716 systemcfg->tb_orig_stamp = tb_last_jiffy;
717 systemcfg->tb_update_count = 0;
718 systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
719 systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
720 systemcfg->tb_to_xs = tb_to_xs;
721#endif
722
723 time_freq = 0;
724
725 /* If platform provided a timezone (pmac), we correct the time */
726 if (timezone_offset) {
727 sys_tz.tz_minuteswest = -timezone_offset / 60;
728 sys_tz.tz_dsttime = 0;
729 xtime.tv_sec -= timezone_offset;
730 }
731
732 last_rtc_update = xtime.tv_sec;
733 set_normalized_timespec(&wall_to_monotonic,
734 -xtime.tv_sec, -xtime.tv_nsec);
735 write_sequnlock_irqrestore(&xtime_lock, flags);
736
737 /* Not exact, but the timer interrupt takes care of this */
738 set_dec(tb_ticks_per_jiffy);
739}
740
741/*
742 * After adjtimex is called, adjust the conversion of tb ticks
743 * to microseconds to keep do_gettimeofday synchronized
744 * with ntpd.
745 *
746 * Use the time_adjust, time_freq and time_offset computed by adjtimex to
747 * adjust the frequency.
748 */
749
750/* #define DEBUG_PPC_ADJTIMEX 1 */
751
752void ppc_adjtimex(void)
753{
754#ifdef CONFIG_PPC64
755 unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec,
756 new_tb_to_xs, new_xsec, new_stamp_xsec;
757 unsigned long tb_ticks_per_sec_delta;
758 long delta_freq, ltemp;
759 struct div_result divres;
760 unsigned long flags;
761 long singleshot_ppm = 0;
762
763 /*
764 * Compute parts per million frequency adjustment to
765 * accomplish the time adjustment implied by time_offset to be
766 * applied over the elapsed time indicated by time_constant.
767 * Use SHIFT_USEC to get it into the same units as
768 * time_freq.
769 */
770 if ( time_offset < 0 ) {
771 ltemp = -time_offset;
772 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
773 ltemp >>= SHIFT_KG + time_constant;
774 ltemp = -ltemp;
775 } else {
776 ltemp = time_offset;
777 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
778 ltemp >>= SHIFT_KG + time_constant;
779 }
780
781 /* If there is a single shot time adjustment in progress */
782 if ( time_adjust ) {
783#ifdef DEBUG_PPC_ADJTIMEX
784 printk("ppc_adjtimex: ");
785 if ( adjusting_time == 0 )
786 printk("starting ");
787 printk("single shot time_adjust = %ld\n", time_adjust);
788#endif
789
790 adjusting_time = 1;
791
792 /*
793 * Compute parts per million frequency adjustment
794 * to match time_adjust
795 */
796 singleshot_ppm = tickadj * HZ;
797 /*
798 * The adjustment should be tickadj*HZ to match the code in
799 * linux/kernel/timer.c, but experiments show that this is too
800 * large. 3/4 of tickadj*HZ seems about right
801 */
802 singleshot_ppm -= singleshot_ppm / 4;
803 /* Use SHIFT_USEC to get it into the same units as time_freq */
804 singleshot_ppm <<= SHIFT_USEC;
805 if ( time_adjust < 0 )
806 singleshot_ppm = -singleshot_ppm;
807 }
808 else {
809#ifdef DEBUG_PPC_ADJTIMEX
810 if ( adjusting_time )
811 printk("ppc_adjtimex: ending single shot time_adjust\n");
812#endif
813 adjusting_time = 0;
814 }
815
816 /* Add up all of the frequency adjustments */
817 delta_freq = time_freq + ltemp + singleshot_ppm;
818
819 /*
820 * Compute a new value for tb_ticks_per_sec based on
821 * the frequency adjustment
822 */
823 den = 1000000 * (1 << (SHIFT_USEC - 8));
824 if ( delta_freq < 0 ) {
825 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den;
826 new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta;
827 }
828 else {
829 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den;
830 new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta;
831 }
832
833#ifdef DEBUG_PPC_ADJTIMEX
834 printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm);
835 printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec);
836#endif
837
838 /*
839 * Compute a new value of tb_to_xs (used to convert tb to
840 * microseconds) and a new value of stamp_xsec which is the
841 * time (in 1/2^20 second units) corresponding to
842 * tb_orig_stamp. This new value of stamp_xsec compensates
843 * for the change in frequency (implied by the new tb_to_xs)
844 * which guarantees that the current time remains the same.
845 */
846 write_seqlock_irqsave( &xtime_lock, flags );
847 tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp;
848 div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres);
849 new_tb_to_xs = divres.result_low;
850 new_xsec = mulhdu(tb_ticks, new_tb_to_xs);
851
852 old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs);
853 new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec;
854
855 update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs);
856
857 write_sequnlock_irqrestore( &xtime_lock, flags );
858#endif /* CONFIG_PPC64 */
859}
860
861
862#define FEBRUARY 2
863#define STARTOFTIME 1970
864#define SECDAY 86400L
865#define SECYR (SECDAY * 365)
866#define leapyear(year) ((year) % 4 == 0 && \
867 ((year) % 100 != 0 || (year) % 400 == 0))
868#define days_in_year(a) (leapyear(a) ? 366 : 365)
869#define days_in_month(a) (month_days[(a) - 1])
870
871static int month_days[12] = {
872 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
873};
874
875/*
876 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
877 */
878void GregorianDay(struct rtc_time * tm)
879{
880 int leapsToDate;
881 int lastYear;
882 int day;
883 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
884
885 lastYear = tm->tm_year - 1;
886
887 /*
888 * Number of leap corrections to apply up to end of last year
889 */
890 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
891
892 /*
893 * This year is a leap year if it is divisible by 4 except when it is
894 * divisible by 100 unless it is divisible by 400
895 *
896 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
897 */
898 day = tm->tm_mon > 2 && leapyear(tm->tm_year);
899
900 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
901 tm->tm_mday;
902
903 tm->tm_wday = day % 7;
904}
905
906void to_tm(int tim, struct rtc_time * tm)
907{
908 register int i;
909 register long hms, day;
910
911 day = tim / SECDAY;
912 hms = tim % SECDAY;
913
914 /* Hours, minutes, seconds are easy */
915 tm->tm_hour = hms / 3600;
916 tm->tm_min = (hms % 3600) / 60;
917 tm->tm_sec = (hms % 3600) % 60;
918
919 /* Number of years in days */
920 for (i = STARTOFTIME; day >= days_in_year(i); i++)
921 day -= days_in_year(i);
922 tm->tm_year = i;
923
924 /* Number of months in days left */
925 if (leapyear(tm->tm_year))
926 days_in_month(FEBRUARY) = 29;
927 for (i = 1; day >= days_in_month(i); i++)
928 day -= days_in_month(i);
929 days_in_month(FEBRUARY) = 28;
930 tm->tm_mon = i;
931
932 /* Days are what is left over (+1) from all that. */
933 tm->tm_mday = day + 1;
934
935 /*
936 * Determine the day of week
937 */
938 GregorianDay(tm);
939}
940
941/* Auxiliary function to compute scaling factors */
942/* Actually the choice of a timebase running at 1/4 the of the bus
943 * frequency giving resolution of a few tens of nanoseconds is quite nice.
944 * It makes this computation very precise (27-28 bits typically) which
945 * is optimistic considering the stability of most processor clock
946 * oscillators and the precision with which the timebase frequency
947 * is measured but does not harm.
948 */
949unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
950{
951 unsigned mlt=0, tmp, err;
952 /* No concern for performance, it's done once: use a stupid
953 * but safe and compact method to find the multiplier.
954 */
955
956 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
957 if (mulhwu(inscale, mlt|tmp) < outscale)
958 mlt |= tmp;
959 }
960
961 /* We might still be off by 1 for the best approximation.
962 * A side effect of this is that if outscale is too large
963 * the returned value will be zero.
964 * Many corner cases have been checked and seem to work,
965 * some might have been forgotten in the test however.
966 */
967
968 err = inscale * (mlt+1);
969 if (err <= inscale/2)
970 mlt++;
971 return mlt;
972}
973
974/*
975 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
976 * result.
977 */
978void div128_by_32(u64 dividend_high, u64 dividend_low,
979 unsigned divisor, struct div_result *dr)
980{
981 unsigned long a, b, c, d;
982 unsigned long w, x, y, z;
983 u64 ra, rb, rc;
984
985 a = dividend_high >> 32;
986 b = dividend_high & 0xffffffff;
987 c = dividend_low >> 32;
988 d = dividend_low & 0xffffffff;
989
990 w = a / divisor;
991 ra = ((u64)(a - (w * divisor)) << 32) + b;
992
993 rb = ((u64) do_div(ra, divisor) << 32) + c;
994 x = ra;
995
996 rc = ((u64) do_div(rb, divisor) << 32) + d;
997 y = rb;
998
999 do_div(rc, divisor);
1000 z = rc;
1001
1002 dr->result_high = ((u64)w << 32) + x;
1003 dr->result_low = ((u64)y << 32) + z;
1004
1005}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
new file mode 100644
index 000000000000..5d638ecddbd0
--- /dev/null
+++ b/arch/powerpc/kernel/traps.c
@@ -0,0 +1,1101 @@
1/*
2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Modified by Cort Dougan (cort@cs.nmt.edu)
10 * and Paul Mackerras (paulus@samba.org)
11 */
12
13/*
14 * This file handles the architecture-dependent parts of hardware exceptions
15 */
16
17#include <linux/config.h>
18#include <linux/errno.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/a.out.h>
28#include <linux/interrupt.h>
29#include <linux/init.h>
30#include <linux/module.h>
31#include <linux/prctl.h>
32#include <linux/delay.h>
33#include <linux/kprobes.h>
34
35#include <asm/kdebug.h>
36#include <asm/pgtable.h>
37#include <asm/uaccess.h>
38#include <asm/system.h>
39#include <asm/io.h>
40#include <asm/machdep.h>
41#include <asm/rtas.h>
42#include <asm/xmon.h>
43#include <asm/pmc.h>
44#ifdef CONFIG_PPC32
45#include <asm/reg.h>
46#endif
47#ifdef CONFIG_PMAC_BACKLIGHT
48#include <asm/backlight.h>
49#endif
50#ifdef CONFIG_PPC64
51#include <asm/firmware.h>
52#include <asm/processor.h>
53#include <asm/systemcfg.h>
54#endif
55
56#ifdef CONFIG_PPC64 /* XXX */
57#define _IO_BASE pci_io_base
58#endif
59
60#ifdef CONFIG_DEBUGGER
61int (*__debugger)(struct pt_regs *regs);
62int (*__debugger_ipi)(struct pt_regs *regs);
63int (*__debugger_bpt)(struct pt_regs *regs);
64int (*__debugger_sstep)(struct pt_regs *regs);
65int (*__debugger_iabr_match)(struct pt_regs *regs);
66int (*__debugger_dabr_match)(struct pt_regs *regs);
67int (*__debugger_fault_handler)(struct pt_regs *regs);
68
69EXPORT_SYMBOL(__debugger);
70EXPORT_SYMBOL(__debugger_ipi);
71EXPORT_SYMBOL(__debugger_bpt);
72EXPORT_SYMBOL(__debugger_sstep);
73EXPORT_SYMBOL(__debugger_iabr_match);
74EXPORT_SYMBOL(__debugger_dabr_match);
75EXPORT_SYMBOL(__debugger_fault_handler);
76#endif
77
78struct notifier_block *powerpc_die_chain;
79static DEFINE_SPINLOCK(die_notifier_lock);
80
81int register_die_notifier(struct notifier_block *nb)
82{
83 int err = 0;
84 unsigned long flags;
85
86 spin_lock_irqsave(&die_notifier_lock, flags);
87 err = notifier_chain_register(&powerpc_die_chain, nb);
88 spin_unlock_irqrestore(&die_notifier_lock, flags);
89 return err;
90}
91
92/*
93 * Trap & Exception support
94 */
95
96static DEFINE_SPINLOCK(die_lock);
97
98int die(const char *str, struct pt_regs *regs, long err)
99{
100 static int die_counter;
101 int nl = 0;
102
103 if (debugger(regs))
104 return 1;
105
106 console_verbose();
107 spin_lock_irq(&die_lock);
108 bust_spinlocks(1);
109#ifdef CONFIG_PMAC_BACKLIGHT
110 if (_machine == _MACH_Pmac) {
111 set_backlight_enable(1);
112 set_backlight_level(BACKLIGHT_MAX);
113 }
114#endif
115 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
116#ifdef CONFIG_PREEMPT
117 printk("PREEMPT ");
118 nl = 1;
119#endif
120#ifdef CONFIG_SMP
121 printk("SMP NR_CPUS=%d ", NR_CPUS);
122 nl = 1;
123#endif
124#ifdef CONFIG_DEBUG_PAGEALLOC
125 printk("DEBUG_PAGEALLOC ");
126 nl = 1;
127#endif
128#ifdef CONFIG_NUMA
129 printk("NUMA ");
130 nl = 1;
131#endif
132#ifdef CONFIG_PPC64
133 switch (systemcfg->platform) {
134 case PLATFORM_PSERIES:
135 printk("PSERIES ");
136 nl = 1;
137 break;
138 case PLATFORM_PSERIES_LPAR:
139 printk("PSERIES LPAR ");
140 nl = 1;
141 break;
142 case PLATFORM_ISERIES_LPAR:
143 printk("ISERIES LPAR ");
144 nl = 1;
145 break;
146 case PLATFORM_POWERMAC:
147 printk("POWERMAC ");
148 nl = 1;
149 break;
150 case PLATFORM_BPA:
151 printk("BPA ");
152 nl = 1;
153 break;
154 }
155#endif
156 if (nl)
157 printk("\n");
158 print_modules();
159 show_regs(regs);
160 bust_spinlocks(0);
161 spin_unlock_irq(&die_lock);
162
163 if (in_interrupt())
164 panic("Fatal exception in interrupt");
165
166 if (panic_on_oops) {
167#ifdef CONFIG_PPC64
168 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
169 ssleep(5);
170#endif
171 panic("Fatal exception");
172 }
173 do_exit(err);
174
175 return 0;
176}
177
178void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
179{
180 siginfo_t info;
181
182 if (!user_mode(regs)) {
183 if (die("Exception in kernel mode", regs, signr))
184 return;
185 }
186
187 memset(&info, 0, sizeof(info));
188 info.si_signo = signr;
189 info.si_code = code;
190 info.si_addr = (void __user *) addr;
191 force_sig_info(signr, &info, current);
192
193 /*
194 * Init gets no signals that it doesn't have a handler for.
195 * That's all very well, but if it has caused a synchronous
196 * exception and we ignore the resulting signal, it will just
197 * generate the same exception over and over again and we get
198 * nowhere. Better to kill it and let the kernel panic.
199 */
200 if (current->pid == 1) {
201 __sighandler_t handler;
202
203 spin_lock_irq(&current->sighand->siglock);
204 handler = current->sighand->action[signr-1].sa.sa_handler;
205 spin_unlock_irq(&current->sighand->siglock);
206 if (handler == SIG_DFL) {
207 /* init has generated a synchronous exception
208 and it doesn't have a handler for the signal */
209 printk(KERN_CRIT "init has generated signal %d "
210 "but has no handler for it\n", signr);
211 do_exit(signr);
212 }
213 }
214}
215
216#ifdef CONFIG_PPC64
217void system_reset_exception(struct pt_regs *regs)
218{
219 /* See if any machine dependent calls */
220 if (ppc_md.system_reset_exception)
221 ppc_md.system_reset_exception(regs);
222
223 die("System Reset", regs, SIGABRT);
224
225 /* Must die if the interrupt is not recoverable */
226 if (!(regs->msr & MSR_RI))
227 panic("Unrecoverable System Reset");
228
229 /* What should we do here? We could issue a shutdown or hard reset. */
230}
231#endif
232
233/*
234 * I/O accesses can cause machine checks on powermacs.
235 * Check if the NIP corresponds to the address of a sync
236 * instruction for which there is an entry in the exception
237 * table.
238 * Note that the 601 only takes a machine check on TEA
239 * (transfer error ack) signal assertion, and does not
240 * set any of the top 16 bits of SRR1.
241 * -- paulus.
242 */
243static inline int check_io_access(struct pt_regs *regs)
244{
245#ifdef CONFIG_PPC_PMAC
246 unsigned long msr = regs->msr;
247 const struct exception_table_entry *entry;
248 unsigned int *nip = (unsigned int *)regs->nip;
249
250 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
251 && (entry = search_exception_tables(regs->nip)) != NULL) {
252 /*
253 * Check that it's a sync instruction, or somewhere
254 * in the twi; isync; nop sequence that inb/inw/inl uses.
255 * As the address is in the exception table
256 * we should be able to read the instr there.
257 * For the debug message, we look at the preceding
258 * load or store.
259 */
260 if (*nip == 0x60000000) /* nop */
261 nip -= 2;
262 else if (*nip == 0x4c00012c) /* isync */
263 --nip;
264 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
265 /* sync or twi */
266 unsigned int rb;
267
268 --nip;
269 rb = (*nip >> 11) & 0x1f;
270 printk(KERN_DEBUG "%s bad port %lx at %p\n",
271 (*nip & 0x100)? "OUT to": "IN from",
272 regs->gpr[rb] - _IO_BASE, nip);
273 regs->msr |= MSR_RI;
274 regs->nip = entry->fixup;
275 return 1;
276 }
277 }
278#endif /* CONFIG_PPC_PMAC */
279 return 0;
280}
281
282#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
283/* On 4xx, the reason for the machine check or program exception
284 is in the ESR. */
285#define get_reason(regs) ((regs)->dsisr)
286#ifndef CONFIG_FSL_BOOKE
287#define get_mc_reason(regs) ((regs)->dsisr)
288#else
289#define get_mc_reason(regs) (mfspr(SPRN_MCSR))
290#endif
291#define REASON_FP ESR_FP
292#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
293#define REASON_PRIVILEGED ESR_PPR
294#define REASON_TRAP ESR_PTR
295
296/* single-step stuff */
297#define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC)
298#define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
299
300#else
301/* On non-4xx, the reason for the machine check or program
302 exception is in the MSR. */
303#define get_reason(regs) ((regs)->msr)
304#define get_mc_reason(regs) ((regs)->msr)
305#define REASON_FP 0x100000
306#define REASON_ILLEGAL 0x80000
307#define REASON_PRIVILEGED 0x40000
308#define REASON_TRAP 0x20000
309
310#define single_stepping(regs) ((regs)->msr & MSR_SE)
311#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
312#endif
313
314/*
315 * This is "fall-back" implementation for configurations
316 * which don't provide platform-specific machine check info
317 */
318void __attribute__ ((weak))
319platform_machine_check(struct pt_regs *regs)
320{
321}
322
323void machine_check_exception(struct pt_regs *regs)
324{
325#ifdef CONFIG_PPC64
326 int recover = 0;
327
328 /* See if any machine dependent calls */
329 if (ppc_md.machine_check_exception)
330 recover = ppc_md.machine_check_exception(regs);
331
332 if (recover)
333 return;
334#else
335 unsigned long reason = get_mc_reason(regs);
336
337 if (user_mode(regs)) {
338 regs->msr |= MSR_RI;
339 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
340 return;
341 }
342
343#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
344 /* the qspan pci read routines can cause machine checks -- Cort */
345 bad_page_fault(regs, regs->dar, SIGBUS);
346 return;
347#endif
348
349 if (debugger_fault_handler(regs)) {
350 regs->msr |= MSR_RI;
351 return;
352 }
353
354 if (check_io_access(regs))
355 return;
356
357#if defined(CONFIG_4xx) && !defined(CONFIG_440A)
358 if (reason & ESR_IMCP) {
359 printk("Instruction");
360 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
361 } else
362 printk("Data");
363 printk(" machine check in kernel mode.\n");
364#elif defined(CONFIG_440A)
365 printk("Machine check in kernel mode.\n");
366 if (reason & ESR_IMCP){
367 printk("Instruction Synchronous Machine Check exception\n");
368 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
369 }
370 else {
371 u32 mcsr = mfspr(SPRN_MCSR);
372 if (mcsr & MCSR_IB)
373 printk("Instruction Read PLB Error\n");
374 if (mcsr & MCSR_DRB)
375 printk("Data Read PLB Error\n");
376 if (mcsr & MCSR_DWB)
377 printk("Data Write PLB Error\n");
378 if (mcsr & MCSR_TLBP)
379 printk("TLB Parity Error\n");
380 if (mcsr & MCSR_ICP){
381 flush_instruction_cache();
382 printk("I-Cache Parity Error\n");
383 }
384 if (mcsr & MCSR_DCSP)
385 printk("D-Cache Search Parity Error\n");
386 if (mcsr & MCSR_DCFP)
387 printk("D-Cache Flush Parity Error\n");
388 if (mcsr & MCSR_IMPE)
389 printk("Machine Check exception is imprecise\n");
390
391 /* Clear MCSR */
392 mtspr(SPRN_MCSR, mcsr);
393 }
394#elif defined (CONFIG_E500)
395 printk("Machine check in kernel mode.\n");
396 printk("Caused by (from MCSR=%lx): ", reason);
397
398 if (reason & MCSR_MCP)
399 printk("Machine Check Signal\n");
400 if (reason & MCSR_ICPERR)
401 printk("Instruction Cache Parity Error\n");
402 if (reason & MCSR_DCP_PERR)
403 printk("Data Cache Push Parity Error\n");
404 if (reason & MCSR_DCPERR)
405 printk("Data Cache Parity Error\n");
406 if (reason & MCSR_GL_CI)
407 printk("Guarded Load or Cache-Inhibited stwcx.\n");
408 if (reason & MCSR_BUS_IAERR)
409 printk("Bus - Instruction Address Error\n");
410 if (reason & MCSR_BUS_RAERR)
411 printk("Bus - Read Address Error\n");
412 if (reason & MCSR_BUS_WAERR)
413 printk("Bus - Write Address Error\n");
414 if (reason & MCSR_BUS_IBERR)
415 printk("Bus - Instruction Data Error\n");
416 if (reason & MCSR_BUS_RBERR)
417 printk("Bus - Read Data Bus Error\n");
418 if (reason & MCSR_BUS_WBERR)
419 printk("Bus - Read Data Bus Error\n");
420 if (reason & MCSR_BUS_IPERR)
421 printk("Bus - Instruction Parity Error\n");
422 if (reason & MCSR_BUS_RPERR)
423 printk("Bus - Read Parity Error\n");
424#elif defined (CONFIG_E200)
425 printk("Machine check in kernel mode.\n");
426 printk("Caused by (from MCSR=%lx): ", reason);
427
428 if (reason & MCSR_MCP)
429 printk("Machine Check Signal\n");
430 if (reason & MCSR_CP_PERR)
431 printk("Cache Push Parity Error\n");
432 if (reason & MCSR_CPERR)
433 printk("Cache Parity Error\n");
434 if (reason & MCSR_EXCP_ERR)
435 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
436 if (reason & MCSR_BUS_IRERR)
437 printk("Bus - Read Bus Error on instruction fetch\n");
438 if (reason & MCSR_BUS_DRERR)
439 printk("Bus - Read Bus Error on data load\n");
440 if (reason & MCSR_BUS_WRERR)
441 printk("Bus - Write Bus Error on buffered store or cache line push\n");
442#else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
443 printk("Machine check in kernel mode.\n");
444 printk("Caused by (from SRR1=%lx): ", reason);
445 switch (reason & 0x601F0000) {
446 case 0x80000:
447 printk("Machine check signal\n");
448 break;
449 case 0: /* for 601 */
450 case 0x40000:
451 case 0x140000: /* 7450 MSS error and TEA */
452 printk("Transfer error ack signal\n");
453 break;
454 case 0x20000:
455 printk("Data parity error signal\n");
456 break;
457 case 0x10000:
458 printk("Address parity error signal\n");
459 break;
460 case 0x20000000:
461 printk("L1 Data Cache error\n");
462 break;
463 case 0x40000000:
464 printk("L1 Instruction Cache error\n");
465 break;
466 case 0x00100000:
467 printk("L2 data cache parity error\n");
468 break;
469 default:
470 printk("Unknown values in msr\n");
471 }
472#endif /* CONFIG_4xx */
473
474 /*
475 * Optional platform-provided routine to print out
476 * additional info, e.g. bus error registers.
477 */
478 platform_machine_check(regs);
479#endif /* CONFIG_PPC64 */
480
481 if (debugger_fault_handler(regs))
482 return;
483 die("Machine check", regs, SIGBUS);
484
485 /* Must die if the interrupt is not recoverable */
486 if (!(regs->msr & MSR_RI))
487 panic("Unrecoverable Machine check");
488}
489
490void SMIException(struct pt_regs *regs)
491{
492 die("System Management Interrupt", regs, SIGABRT);
493}
494
495void unknown_exception(struct pt_regs *regs)
496{
497 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
498 regs->nip, regs->msr, regs->trap);
499
500 _exception(SIGTRAP, regs, 0, 0);
501}
502
503void instruction_breakpoint_exception(struct pt_regs *regs)
504{
505 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
506 5, SIGTRAP) == NOTIFY_STOP)
507 return;
508 if (debugger_iabr_match(regs))
509 return;
510 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
511}
512
513void RunModeException(struct pt_regs *regs)
514{
515 _exception(SIGTRAP, regs, 0, 0);
516}
517
518void __kprobes single_step_exception(struct pt_regs *regs)
519{
520 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */
521
522 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
523 5, SIGTRAP) == NOTIFY_STOP)
524 return;
525 if (debugger_sstep(regs))
526 return;
527
528 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
529}
530
531/*
532 * After we have successfully emulated an instruction, we have to
533 * check if the instruction was being single-stepped, and if so,
534 * pretend we got a single-step exception. This was pointed out
535 * by Kumar Gala. -- paulus
536 */
537static void emulate_single_step(struct pt_regs *regs)
538{
539 if (single_stepping(regs)) {
540 clear_single_step(regs);
541 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
542 }
543}
544
545static void parse_fpe(struct pt_regs *regs)
546{
547 int code = 0;
548 unsigned long fpscr;
549
550 flush_fp_to_thread(current);
551
552 fpscr = current->thread.fpscr.val;
553
554 /* Invalid operation */
555 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
556 code = FPE_FLTINV;
557
558 /* Overflow */
559 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
560 code = FPE_FLTOVF;
561
562 /* Underflow */
563 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
564 code = FPE_FLTUND;
565
566 /* Divide by zero */
567 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
568 code = FPE_FLTDIV;
569
570 /* Inexact result */
571 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
572 code = FPE_FLTRES;
573
574 _exception(SIGFPE, regs, code, regs->nip);
575}
576
577/*
578 * Illegal instruction emulation support. Originally written to
579 * provide the PVR to user applications using the mfspr rd, PVR.
580 * Return non-zero if we can't emulate, or -EFAULT if the associated
581 * memory access caused an access fault. Return zero on success.
582 *
583 * There are a couple of ways to do this, either "decode" the instruction
584 * or directly match lots of bits. In this case, matching lots of
585 * bits is faster and easier.
586 *
587 */
588#define INST_MFSPR_PVR 0x7c1f42a6
589#define INST_MFSPR_PVR_MASK 0xfc1fffff
590
591#define INST_DCBA 0x7c0005ec
592#define INST_DCBA_MASK 0x7c0007fe
593
594#define INST_MCRXR 0x7c000400
595#define INST_MCRXR_MASK 0x7c0007fe
596
597#define INST_STRING 0x7c00042a
598#define INST_STRING_MASK 0x7c0007fe
599#define INST_STRING_GEN_MASK 0x7c00067e
600#define INST_LSWI 0x7c0004aa
601#define INST_LSWX 0x7c00042a
602#define INST_STSWI 0x7c0005aa
603#define INST_STSWX 0x7c00052a
604
605static int emulate_string_inst(struct pt_regs *regs, u32 instword)
606{
607 u8 rT = (instword >> 21) & 0x1f;
608 u8 rA = (instword >> 16) & 0x1f;
609 u8 NB_RB = (instword >> 11) & 0x1f;
610 u32 num_bytes;
611 unsigned long EA;
612 int pos = 0;
613
614 /* Early out if we are an invalid form of lswx */
615 if ((instword & INST_STRING_MASK) == INST_LSWX)
616 if ((rT == rA) || (rT == NB_RB))
617 return -EINVAL;
618
619 EA = (rA == 0) ? 0 : regs->gpr[rA];
620
621 switch (instword & INST_STRING_MASK) {
622 case INST_LSWX:
623 case INST_STSWX:
624 EA += NB_RB;
625 num_bytes = regs->xer & 0x7f;
626 break;
627 case INST_LSWI:
628 case INST_STSWI:
629 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
630 break;
631 default:
632 return -EINVAL;
633 }
634
635 while (num_bytes != 0)
636 {
637 u8 val;
638 u32 shift = 8 * (3 - (pos & 0x3));
639
640 switch ((instword & INST_STRING_MASK)) {
641 case INST_LSWX:
642 case INST_LSWI:
643 if (get_user(val, (u8 __user *)EA))
644 return -EFAULT;
645 /* first time updating this reg,
646 * zero it out */
647 if (pos == 0)
648 regs->gpr[rT] = 0;
649 regs->gpr[rT] |= val << shift;
650 break;
651 case INST_STSWI:
652 case INST_STSWX:
653 val = regs->gpr[rT] >> shift;
654 if (put_user(val, (u8 __user *)EA))
655 return -EFAULT;
656 break;
657 }
658 /* move EA to next address */
659 EA += 1;
660 num_bytes--;
661
662 /* manage our position within the register */
663 if (++pos == 4) {
664 pos = 0;
665 if (++rT == 32)
666 rT = 0;
667 }
668 }
669
670 return 0;
671}
672
673static int emulate_instruction(struct pt_regs *regs)
674{
675 u32 instword;
676 u32 rd;
677
678 if (!user_mode(regs))
679 return -EINVAL;
680 CHECK_FULL_REGS(regs);
681
682 if (get_user(instword, (u32 __user *)(regs->nip)))
683 return -EFAULT;
684
685 /* Emulate the mfspr rD, PVR. */
686 if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
687 rd = (instword >> 21) & 0x1f;
688 regs->gpr[rd] = mfspr(SPRN_PVR);
689 return 0;
690 }
691
692 /* Emulating the dcba insn is just a no-op. */
693 if ((instword & INST_DCBA_MASK) == INST_DCBA)
694 return 0;
695
696 /* Emulate the mcrxr insn. */
697 if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
698 int shift = (instword >> 21) & 0x1c;
699 unsigned long msk = 0xf0000000UL >> shift;
700
701 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
702 regs->xer &= ~0xf0000000UL;
703 return 0;
704 }
705
706 /* Emulate load/store string insn. */
707 if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
708 return emulate_string_inst(regs, instword);
709
710 return -EINVAL;
711}
712
713/*
714 * Look through the list of trap instructions that are used for BUG(),
715 * BUG_ON() and WARN_ON() and see if we hit one. At this point we know
716 * that the exception was caused by a trap instruction of some kind.
717 * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
718 * otherwise.
719 */
720extern struct bug_entry __start___bug_table[], __stop___bug_table[];
721
722#ifndef CONFIG_MODULES
723#define module_find_bug(x) NULL
724#endif
725
726struct bug_entry *find_bug(unsigned long bugaddr)
727{
728 struct bug_entry *bug;
729
730 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
731 if (bugaddr == bug->bug_addr)
732 return bug;
733 return module_find_bug(bugaddr);
734}
735
736static int check_bug_trap(struct pt_regs *regs)
737{
738 struct bug_entry *bug;
739 unsigned long addr;
740
741 if (regs->msr & MSR_PR)
742 return 0; /* not in kernel */
743 addr = regs->nip; /* address of trap instruction */
744 if (addr < PAGE_OFFSET)
745 return 0;
746 bug = find_bug(regs->nip);
747 if (bug == NULL)
748 return 0;
749 if (bug->line & BUG_WARNING_TRAP) {
750 /* this is a WARN_ON rather than BUG/BUG_ON */
751#ifdef CONFIG_XMON
752 xmon_printf(KERN_ERR "Badness in %s at %s:%d\n",
753 bug->function, bug->file,
754 bug->line & ~BUG_WARNING_TRAP);
755#endif /* CONFIG_XMON */
756 printk(KERN_ERR "Badness in %s at %s:%d\n",
757 bug->function, bug->file,
758 bug->line & ~BUG_WARNING_TRAP);
759 dump_stack();
760 return 1;
761 }
762#ifdef CONFIG_XMON
763 xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
764 bug->function, bug->file, bug->line);
765 xmon(regs);
766#endif /* CONFIG_XMON */
767 printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
768 bug->function, bug->file, bug->line);
769
770 return 0;
771}
772
773void __kprobes program_check_exception(struct pt_regs *regs)
774{
775 unsigned int reason = get_reason(regs);
776 extern int do_mathemu(struct pt_regs *regs);
777
778#ifdef CONFIG_MATH_EMULATION
779 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
780 * but there seems to be a hardware bug on the 405GP (RevD)
781 * that means ESR is sometimes set incorrectly - either to
782 * ESR_DST (!?) or 0. In the process of chasing this with the
783 * hardware people - not sure if it can happen on any illegal
784 * instruction or only on FP instructions, whether there is a
785 * pattern to occurences etc. -dgibson 31/Mar/2003 */
786 if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) {
787 emulate_single_step(regs);
788 return;
789 }
790#endif /* CONFIG_MATH_EMULATION */
791
792 if (reason & REASON_FP) {
793 /* IEEE FP exception */
794 parse_fpe(regs);
795 return;
796 }
797 if (reason & REASON_TRAP) {
798 /* trap exception */
799 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
800 == NOTIFY_STOP)
801 return;
802 if (debugger_bpt(regs))
803 return;
804 if (check_bug_trap(regs)) {
805 regs->nip += 4;
806 return;
807 }
808 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
809 return;
810 }
811
812 /* Try to emulate it if we should. */
813 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
814 switch (emulate_instruction(regs)) {
815 case 0:
816 regs->nip += 4;
817 emulate_single_step(regs);
818 return;
819 case -EFAULT:
820 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
821 return;
822 }
823 }
824
825 if (reason & REASON_PRIVILEGED)
826 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
827 else
828 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
829}
830
831void alignment_exception(struct pt_regs *regs)
832{
833 int fixed;
834
835 fixed = fix_alignment(regs);
836
837 if (fixed == 1) {
838 regs->nip += 4; /* skip over emulated instruction */
839 emulate_single_step(regs);
840 return;
841 }
842
843 /* Operand address was bad */
844 if (fixed == -EFAULT) {
845 if (user_mode(regs))
846 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar);
847 else
848 /* Search exception table */
849 bad_page_fault(regs, regs->dar, SIGSEGV);
850 return;
851 }
852 _exception(SIGBUS, regs, BUS_ADRALN, regs->dar);
853}
854
855void StackOverflow(struct pt_regs *regs)
856{
857 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
858 current, regs->gpr[1]);
859 debugger(regs);
860 show_regs(regs);
861 panic("kernel stack overflow");
862}
863
864void nonrecoverable_exception(struct pt_regs *regs)
865{
866 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
867 regs->nip, regs->msr);
868 debugger(regs);
869 die("nonrecoverable exception", regs, SIGKILL);
870}
871
872void trace_syscall(struct pt_regs *regs)
873{
874 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
875 current, current->pid, regs->nip, regs->link, regs->gpr[0],
876 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
877}
878
879void kernel_fp_unavailable_exception(struct pt_regs *regs)
880{
881 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
882 "%lx at %lx\n", regs->trap, regs->nip);
883 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
884}
885
886void altivec_unavailable_exception(struct pt_regs *regs)
887{
888#if !defined(CONFIG_ALTIVEC)
889 if (user_mode(regs)) {
890 /* A user program has executed an altivec instruction,
891 but this kernel doesn't support altivec. */
892 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
893 return;
894 }
895#endif
896 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
897 "%lx at %lx\n", regs->trap, regs->nip);
898 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
899}
900
901#ifdef CONFIG_PPC64
902extern perf_irq_t perf_irq;
903#endif
904
905#if defined(CONFIG_PPC64) || defined(CONFIG_E500)
906void performance_monitor_exception(struct pt_regs *regs)
907{
908 perf_irq(regs);
909}
910#endif
911
912#ifdef CONFIG_8xx
913void SoftwareEmulation(struct pt_regs *regs)
914{
915 extern int do_mathemu(struct pt_regs *);
916 extern int Soft_emulate_8xx(struct pt_regs *);
917 int errcode;
918
919 CHECK_FULL_REGS(regs);
920
921 if (!user_mode(regs)) {
922 debugger(regs);
923 die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
924 }
925
926#ifdef CONFIG_MATH_EMULATION
927 errcode = do_mathemu(regs);
928#else
929 errcode = Soft_emulate_8xx(regs);
930#endif
931 if (errcode) {
932 if (errcode > 0)
933 _exception(SIGFPE, regs, 0, 0);
934 else if (errcode == -EFAULT)
935 _exception(SIGSEGV, regs, 0, 0);
936 else
937 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
938 } else
939 emulate_single_step(regs);
940}
941#endif /* CONFIG_8xx */
942
943#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
944
945void DebugException(struct pt_regs *regs, unsigned long debug_status)
946{
947 if (debug_status & DBSR_IC) { /* instruction completion */
948 regs->msr &= ~MSR_DE;
949 if (user_mode(regs)) {
950 current->thread.dbcr0 &= ~DBCR0_IC;
951 } else {
952 /* Disable instruction completion */
953 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
954 /* Clear the instruction completion event */
955 mtspr(SPRN_DBSR, DBSR_IC);
956 if (debugger_sstep(regs))
957 return;
958 }
959 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
960 }
961}
962#endif /* CONFIG_4xx || CONFIG_BOOKE */
963
964#if !defined(CONFIG_TAU_INT)
965void TAUException(struct pt_regs *regs)
966{
967 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
968 regs->nip, regs->msr, regs->trap, print_tainted());
969}
970#endif /* CONFIG_INT_TAU */
971
972#ifdef CONFIG_ALTIVEC
973void altivec_assist_exception(struct pt_regs *regs)
974{
975 int err;
976
977 if (!user_mode(regs)) {
978 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
979 " at %lx\n", regs->nip);
980 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
981 }
982
983 flush_altivec_to_thread(current);
984
985 err = emulate_altivec(regs);
986 if (err == 0) {
987 regs->nip += 4; /* skip emulated instruction */
988 emulate_single_step(regs);
989 return;
990 }
991
992 if (err == -EFAULT) {
993 /* got an error reading the instruction */
994 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
995 } else {
996 /* didn't recognize the instruction */
997 /* XXX quick hack for now: set the non-Java bit in the VSCR */
998 if (printk_ratelimit())
999 printk(KERN_ERR "Unrecognized altivec instruction "
1000 "in %s at %lx\n", current->comm, regs->nip);
1001 current->thread.vscr.u[3] |= 0x10000;
1002 }
1003}
1004#endif /* CONFIG_ALTIVEC */
1005
1006#ifdef CONFIG_FSL_BOOKE
1007void CacheLockingException(struct pt_regs *regs, unsigned long address,
1008 unsigned long error_code)
1009{
1010 /* We treat cache locking instructions from the user
1011 * as priv ops, in the future we could try to do
1012 * something smarter
1013 */
1014 if (error_code & (ESR_DLK|ESR_ILK))
1015 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1016 return;
1017}
1018#endif /* CONFIG_FSL_BOOKE */
1019
1020#ifdef CONFIG_SPE
1021void SPEFloatingPointException(struct pt_regs *regs)
1022{
1023 unsigned long spefscr;
1024 int fpexc_mode;
1025 int code = 0;
1026
1027 spefscr = current->thread.spefscr;
1028 fpexc_mode = current->thread.fpexc_mode;
1029
1030 /* Hardware does not neccessarily set sticky
1031 * underflow/overflow/invalid flags */
1032 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1033 code = FPE_FLTOVF;
1034 spefscr |= SPEFSCR_FOVFS;
1035 }
1036 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1037 code = FPE_FLTUND;
1038 spefscr |= SPEFSCR_FUNFS;
1039 }
1040 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1041 code = FPE_FLTDIV;
1042 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1043 code = FPE_FLTINV;
1044 spefscr |= SPEFSCR_FINVS;
1045 }
1046 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1047 code = FPE_FLTRES;
1048
1049 current->thread.spefscr = spefscr;
1050
1051 _exception(SIGFPE, regs, code, regs->nip);
1052 return;
1053}
1054#endif
1055
1056/*
1057 * We enter here if we get an unrecoverable exception, that is, one
1058 * that happened at a point where the RI (recoverable interrupt) bit
1059 * in the MSR is 0. This indicates that SRR0/1 are live, and that
1060 * we therefore lost state by taking this exception.
1061 */
1062void unrecoverable_exception(struct pt_regs *regs)
1063{
1064 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1065 regs->trap, regs->nip);
1066 die("Unrecoverable exception", regs, SIGABRT);
1067}
1068
1069#ifdef CONFIG_BOOKE_WDT
1070/*
1071 * Default handler for a Watchdog exception,
1072 * spins until a reboot occurs
1073 */
1074void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1075{
1076 /* Generic WatchdogHandler, implement your own */
1077 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1078 return;
1079}
1080
1081void WatchdogException(struct pt_regs *regs)
1082{
1083 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1084 WatchdogHandler(regs);
1085}
1086#endif
1087
1088/*
1089 * We enter here if we discover during exception entry that we are
1090 * running in supervisor mode with a userspace value in the stack pointer.
1091 */
1092void kernel_bad_stack(struct pt_regs *regs)
1093{
1094 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1095 regs->gpr[1], regs->nip);
1096 die("Bad kernel stack pointer", regs, SIGABRT);
1097}
1098
1099void __init trap_init(void)
1100{
1101}
diff --git a/arch/powerpc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c
new file mode 100644
index 000000000000..604d0947cb20
--- /dev/null
+++ b/arch/powerpc/kernel/vecemu.c
@@ -0,0 +1,345 @@
1/*
2 * Routines to emulate some Altivec/VMX instructions, specifically
3 * those that can trap when given denormalized operands in Java mode.
4 */
5#include <linux/kernel.h>
6#include <linux/errno.h>
7#include <linux/sched.h>
8#include <asm/ptrace.h>
9#include <asm/processor.h>
10#include <asm/uaccess.h>
11
12/* Functions in vector.S */
13extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b);
14extern void vsubfp(vector128 *dst, vector128 *a, vector128 *b);
15extern void vmaddfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
16extern void vnmsubfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
17extern void vrefp(vector128 *dst, vector128 *src);
18extern void vrsqrtefp(vector128 *dst, vector128 *src);
19extern void vexptep(vector128 *dst, vector128 *src);
20
21static unsigned int exp2s[8] = {
22 0x800000,
23 0x8b95c2,
24 0x9837f0,
25 0xa5fed7,
26 0xb504f3,
27 0xc5672a,
28 0xd744fd,
29 0xeac0c7
30};
31
32/*
33 * Computes an estimate of 2^x. The `s' argument is the 32-bit
34 * single-precision floating-point representation of x.
35 */
36static unsigned int eexp2(unsigned int s)
37{
38 int exp, pwr;
39 unsigned int mant, frac;
40
41 /* extract exponent field from input */
42 exp = ((s >> 23) & 0xff) - 127;
43 if (exp > 7) {
44 /* check for NaN input */
45 if (exp == 128 && (s & 0x7fffff) != 0)
46 return s | 0x400000; /* return QNaN */
47 /* 2^-big = 0, 2^+big = +Inf */
48 return (s & 0x80000000)? 0: 0x7f800000; /* 0 or +Inf */
49 }
50 if (exp < -23)
51 return 0x3f800000; /* 1.0 */
52
53 /* convert to fixed point integer in 9.23 representation */
54 pwr = (s & 0x7fffff) | 0x800000;
55 if (exp > 0)
56 pwr <<= exp;
57 else
58 pwr >>= -exp;
59 if (s & 0x80000000)
60 pwr = -pwr;
61
62 /* extract integer part, which becomes exponent part of result */
63 exp = (pwr >> 23) + 126;
64 if (exp >= 254)
65 return 0x7f800000;
66 if (exp < -23)
67 return 0;
68
69 /* table lookup on top 3 bits of fraction to get mantissa */
70 mant = exp2s[(pwr >> 20) & 7];
71
72 /* linear interpolation using remaining 20 bits of fraction */
73 asm("mulhwu %0,%1,%2" : "=r" (frac)
74 : "r" (pwr << 12), "r" (0x172b83ff));
75 asm("mulhwu %0,%1,%2" : "=r" (frac) : "r" (frac), "r" (mant));
76 mant += frac;
77
78 if (exp >= 0)
79 return mant + (exp << 23);
80
81 /* denormalized result */
82 exp = -exp;
83 mant += 1 << (exp - 1);
84 return mant >> exp;
85}
86
87/*
88 * Computes an estimate of log_2(x). The `s' argument is the 32-bit
89 * single-precision floating-point representation of x.
90 */
91static unsigned int elog2(unsigned int s)
92{
93 int exp, mant, lz, frac;
94
95 exp = s & 0x7f800000;
96 mant = s & 0x7fffff;
97 if (exp == 0x7f800000) { /* Inf or NaN */
98 if (mant != 0)
99 s |= 0x400000; /* turn NaN into QNaN */
100 return s;
101 }
102 if ((exp | mant) == 0) /* +0 or -0 */
103 return 0xff800000; /* return -Inf */
104
105 if (exp == 0) {
106 /* denormalized */
107 asm("cntlzw %0,%1" : "=r" (lz) : "r" (mant));
108 mant <<= lz - 8;
109 exp = (-118 - lz) << 23;
110 } else {
111 mant |= 0x800000;
112 exp -= 127 << 23;
113 }
114
115 if (mant >= 0xb504f3) { /* 2^0.5 * 2^23 */
116 exp |= 0x400000; /* 0.5 * 2^23 */
117 asm("mulhwu %0,%1,%2" : "=r" (mant)
118 : "r" (mant), "r" (0xb504f334)); /* 2^-0.5 * 2^32 */
119 }
120 if (mant >= 0x9837f0) { /* 2^0.25 * 2^23 */
121 exp |= 0x200000; /* 0.25 * 2^23 */
122 asm("mulhwu %0,%1,%2" : "=r" (mant)
123 : "r" (mant), "r" (0xd744fccb)); /* 2^-0.25 * 2^32 */
124 }
125 if (mant >= 0x8b95c2) { /* 2^0.125 * 2^23 */
126 exp |= 0x100000; /* 0.125 * 2^23 */
127 asm("mulhwu %0,%1,%2" : "=r" (mant)
128 : "r" (mant), "r" (0xeac0c6e8)); /* 2^-0.125 * 2^32 */
129 }
130 if (mant > 0x800000) { /* 1.0 * 2^23 */
131 /* calculate (mant - 1) * 1.381097463 */
132 /* 1.381097463 == 0.125 / (2^0.125 - 1) */
133 asm("mulhwu %0,%1,%2" : "=r" (frac)
134 : "r" ((mant - 0x800000) << 1), "r" (0xb0c7cd3a));
135 exp += frac;
136 }
137 s = exp & 0x80000000;
138 if (exp != 0) {
139 if (s)
140 exp = -exp;
141 asm("cntlzw %0,%1" : "=r" (lz) : "r" (exp));
142 lz = 8 - lz;
143 if (lz > 0)
144 exp >>= lz;
145 else if (lz < 0)
146 exp <<= -lz;
147 s += ((lz + 126) << 23) + exp;
148 }
149 return s;
150}
151
152#define VSCR_SAT 1
153
154static int ctsxs(unsigned int x, int scale, unsigned int *vscrp)
155{
156 int exp, mant;
157
158 exp = (x >> 23) & 0xff;
159 mant = x & 0x7fffff;
160 if (exp == 255 && mant != 0)
161 return 0; /* NaN -> 0 */
162 exp = exp - 127 + scale;
163 if (exp < 0)
164 return 0; /* round towards zero */
165 if (exp >= 31) {
166 /* saturate, unless the result would be -2^31 */
167 if (x + (scale << 23) != 0xcf000000)
168 *vscrp |= VSCR_SAT;
169 return (x & 0x80000000)? 0x80000000: 0x7fffffff;
170 }
171 mant |= 0x800000;
172 mant = (mant << 7) >> (30 - exp);
173 return (x & 0x80000000)? -mant: mant;
174}
175
176static unsigned int ctuxs(unsigned int x, int scale, unsigned int *vscrp)
177{
178 int exp;
179 unsigned int mant;
180
181 exp = (x >> 23) & 0xff;
182 mant = x & 0x7fffff;
183 if (exp == 255 && mant != 0)
184 return 0; /* NaN -> 0 */
185 exp = exp - 127 + scale;
186 if (exp < 0)
187 return 0; /* round towards zero */
188 if (x & 0x80000000) {
189 /* negative => saturate to 0 */
190 *vscrp |= VSCR_SAT;
191 return 0;
192 }
193 if (exp >= 32) {
194 /* saturate */
195 *vscrp |= VSCR_SAT;
196 return 0xffffffff;
197 }
198 mant |= 0x800000;
199 mant = (mant << 8) >> (31 - exp);
200 return mant;
201}
202
203/* Round to floating integer, towards 0 */
204static unsigned int rfiz(unsigned int x)
205{
206 int exp;
207
208 exp = ((x >> 23) & 0xff) - 127;
209 if (exp == 128 && (x & 0x7fffff) != 0)
210 return x | 0x400000; /* NaN -> make it a QNaN */
211 if (exp >= 23)
212 return x; /* it's an integer already (or Inf) */
213 if (exp < 0)
214 return x & 0x80000000; /* |x| < 1.0 rounds to 0 */
215 return x & ~(0x7fffff >> exp);
216}
217
218/* Round to floating integer, towards +/- Inf */
219static unsigned int rfii(unsigned int x)
220{
221 int exp, mask;
222
223 exp = ((x >> 23) & 0xff) - 127;
224 if (exp == 128 && (x & 0x7fffff) != 0)
225 return x | 0x400000; /* NaN -> make it a QNaN */
226 if (exp >= 23)
227 return x; /* it's an integer already (or Inf) */
228 if ((x & 0x7fffffff) == 0)
229 return x; /* +/-0 -> +/-0 */
230 if (exp < 0)
231 /* 0 < |x| < 1.0 rounds to +/- 1.0 */
232 return (x & 0x80000000) | 0x3f800000;
233 mask = 0x7fffff >> exp;
234 /* mantissa overflows into exponent - that's OK,
235 it can't overflow into the sign bit */
236 return (x + mask) & ~mask;
237}
238
239/* Round to floating integer, to nearest */
240static unsigned int rfin(unsigned int x)
241{
242 int exp, half;
243
244 exp = ((x >> 23) & 0xff) - 127;
245 if (exp == 128 && (x & 0x7fffff) != 0)
246 return x | 0x400000; /* NaN -> make it a QNaN */
247 if (exp >= 23)
248 return x; /* it's an integer already (or Inf) */
249 if (exp < -1)
250 return x & 0x80000000; /* |x| < 0.5 -> +/-0 */
251 if (exp == -1)
252 /* 0.5 <= |x| < 1.0 rounds to +/- 1.0 */
253 return (x & 0x80000000) | 0x3f800000;
254 half = 0x400000 >> exp;
255 /* add 0.5 to the magnitude and chop off the fraction bits */
256 return (x + half) & ~(0x7fffff >> exp);
257}
258
259int emulate_altivec(struct pt_regs *regs)
260{
261 unsigned int instr, i;
262 unsigned int va, vb, vc, vd;
263 vector128 *vrs;
264
265 if (get_user(instr, (unsigned int __user *) regs->nip))
266 return -EFAULT;
267 if ((instr >> 26) != 4)
268 return -EINVAL; /* not an altivec instruction */
269 vd = (instr >> 21) & 0x1f;
270 va = (instr >> 16) & 0x1f;
271 vb = (instr >> 11) & 0x1f;
272 vc = (instr >> 6) & 0x1f;
273
274 vrs = current->thread.vr;
275 switch (instr & 0x3f) {
276 case 10:
277 switch (vc) {
278 case 0: /* vaddfp */
279 vaddfp(&vrs[vd], &vrs[va], &vrs[vb]);
280 break;
281 case 1: /* vsubfp */
282 vsubfp(&vrs[vd], &vrs[va], &vrs[vb]);
283 break;
284 case 4: /* vrefp */
285 vrefp(&vrs[vd], &vrs[vb]);
286 break;
287 case 5: /* vrsqrtefp */
288 vrsqrtefp(&vrs[vd], &vrs[vb]);
289 break;
290 case 6: /* vexptefp */
291 for (i = 0; i < 4; ++i)
292 vrs[vd].u[i] = eexp2(vrs[vb].u[i]);
293 break;
294 case 7: /* vlogefp */
295 for (i = 0; i < 4; ++i)
296 vrs[vd].u[i] = elog2(vrs[vb].u[i]);
297 break;
298 case 8: /* vrfin */
299 for (i = 0; i < 4; ++i)
300 vrs[vd].u[i] = rfin(vrs[vb].u[i]);
301 break;
302 case 9: /* vrfiz */
303 for (i = 0; i < 4; ++i)
304 vrs[vd].u[i] = rfiz(vrs[vb].u[i]);
305 break;
306 case 10: /* vrfip */
307 for (i = 0; i < 4; ++i) {
308 u32 x = vrs[vb].u[i];
309 x = (x & 0x80000000)? rfiz(x): rfii(x);
310 vrs[vd].u[i] = x;
311 }
312 break;
313 case 11: /* vrfim */
314 for (i = 0; i < 4; ++i) {
315 u32 x = vrs[vb].u[i];
316 x = (x & 0x80000000)? rfii(x): rfiz(x);
317 vrs[vd].u[i] = x;
318 }
319 break;
320 case 14: /* vctuxs */
321 for (i = 0; i < 4; ++i)
322 vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va,
323 &current->thread.vscr.u[3]);
324 break;
325 case 15: /* vctsxs */
326 for (i = 0; i < 4; ++i)
327 vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va,
328 &current->thread.vscr.u[3]);
329 break;
330 default:
331 return -EINVAL;
332 }
333 break;
334 case 46: /* vmaddfp */
335 vmaddfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
336 break;
337 case 47: /* vnmsubfp */
338 vnmsubfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
339 break;
340 default:
341 return -EINVAL;
342 }
343
344 return 0;
345}
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
new file mode 100644
index 000000000000..66b3d03c5fa5
--- /dev/null
+++ b/arch/powerpc/kernel/vector.S
@@ -0,0 +1,197 @@
1#include <linux/config.h>
2#include <asm/ppc_asm.h>
3#include <asm/reg.h>
4
5/*
6 * The routines below are in assembler so we can closely control the
7 * usage of floating-point registers. These routines must be called
8 * with preempt disabled.
9 */
10#ifdef CONFIG_PPC32
11 .data
12fpzero:
13 .long 0
14fpone:
15 .long 0x3f800000 /* 1.0 in single-precision FP */
16fphalf:
17 .long 0x3f000000 /* 0.5 in single-precision FP */
18
19#define LDCONST(fr, name) \
20 lis r11,name@ha; \
21 lfs fr,name@l(r11)
22#else
23
24 .section ".toc","aw"
25fpzero:
26 .tc FD_0_0[TC],0
27fpone:
28 .tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */
29fphalf:
30 .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */
31
32#define LDCONST(fr, name) \
33 lfd fr,name@toc(r2)
34#endif
35
36 .text
37/*
38 * Internal routine to enable floating point and set FPSCR to 0.
39 * Don't call it from C; it doesn't use the normal calling convention.
40 */
41fpenable:
42#ifdef CONFIG_PPC32
43 stwu r1,-64(r1)
44#else
45 stdu r1,-64(r1)
46#endif
47 mfmsr r10
48 ori r11,r10,MSR_FP
49 mtmsr r11
50 isync
51 stfd fr0,24(r1)
52 stfd fr1,16(r1)
53 stfd fr31,8(r1)
54 LDCONST(fr1, fpzero)
55 mffs fr31
56 mtfsf 0xff,fr1
57 blr
58
59fpdisable:
60 mtlr r12
61 mtfsf 0xff,fr31
62 lfd fr31,8(r1)
63 lfd fr1,16(r1)
64 lfd fr0,24(r1)
65 mtmsr r10
66 isync
67 addi r1,r1,64
68 blr
69
70/*
71 * Vector add, floating point.
72 */
73_GLOBAL(vaddfp)
74 mflr r12
75 bl fpenable
76 li r0,4
77 mtctr r0
78 li r6,0
791: lfsx fr0,r4,r6
80 lfsx fr1,r5,r6
81 fadds fr0,fr0,fr1
82 stfsx fr0,r3,r6
83 addi r6,r6,4
84 bdnz 1b
85 b fpdisable
86
87/*
88 * Vector subtract, floating point.
89 */
90_GLOBAL(vsubfp)
91 mflr r12
92 bl fpenable
93 li r0,4
94 mtctr r0
95 li r6,0
961: lfsx fr0,r4,r6
97 lfsx fr1,r5,r6
98 fsubs fr0,fr0,fr1
99 stfsx fr0,r3,r6
100 addi r6,r6,4
101 bdnz 1b
102 b fpdisable
103
104/*
105 * Vector multiply and add, floating point.
106 */
107_GLOBAL(vmaddfp)
108 mflr r12
109 bl fpenable
110 stfd fr2,32(r1)
111 li r0,4
112 mtctr r0
113 li r7,0
1141: lfsx fr0,r4,r7
115 lfsx fr1,r5,r7
116 lfsx fr2,r6,r7
117 fmadds fr0,fr0,fr2,fr1
118 stfsx fr0,r3,r7
119 addi r7,r7,4
120 bdnz 1b
121 lfd fr2,32(r1)
122 b fpdisable
123
124/*
125 * Vector negative multiply and subtract, floating point.
126 */
127_GLOBAL(vnmsubfp)
128 mflr r12
129 bl fpenable
130 stfd fr2,32(r1)
131 li r0,4
132 mtctr r0
133 li r7,0
1341: lfsx fr0,r4,r7
135 lfsx fr1,r5,r7
136 lfsx fr2,r6,r7
137 fnmsubs fr0,fr0,fr2,fr1
138 stfsx fr0,r3,r7
139 addi r7,r7,4
140 bdnz 1b
141 lfd fr2,32(r1)
142 b fpdisable
143
144/*
145 * Vector reciprocal estimate. We just compute 1.0/x.
146 * r3 -> destination, r4 -> source.
147 */
148_GLOBAL(vrefp)
149 mflr r12
150 bl fpenable
151 li r0,4
152 LDCONST(fr1, fpone)
153 mtctr r0
154 li r6,0
1551: lfsx fr0,r4,r6
156 fdivs fr0,fr1,fr0
157 stfsx fr0,r3,r6
158 addi r6,r6,4
159 bdnz 1b
160 b fpdisable
161
162/*
163 * Vector reciprocal square-root estimate, floating point.
164 * We use the frsqrte instruction for the initial estimate followed
165 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
166 * r3 -> destination, r4 -> source.
167 */
168_GLOBAL(vrsqrtefp)
169 mflr r12
170 bl fpenable
171 stfd fr2,32(r1)
172 stfd fr3,40(r1)
173 stfd fr4,48(r1)
174 stfd fr5,56(r1)
175 li r0,4
176 LDCONST(fr4, fpone)
177 LDCONST(fr5, fphalf)
178 mtctr r0
179 li r6,0
1801: lfsx fr0,r4,r6
181 frsqrte fr1,fr0 /* r = frsqrte(s) */
182 fmuls fr3,fr1,fr0 /* r * s */
183 fmuls fr2,fr1,fr5 /* r * 0.5 */
184 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
185 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
186 fmuls fr3,fr1,fr0 /* r * s */
187 fmuls fr2,fr1,fr5 /* r * 0.5 */
188 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
189 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
190 stfsx fr1,r3,r6
191 addi r6,r6,4
192 bdnz 1b
193 lfd fr5,56(r1)
194 lfd fr4,48(r1)
195 lfd fr3,40(r1)
196 lfd fr2,32(r1)
197 b fpdisable
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
new file mode 100644
index 000000000000..97082a4203ad
--- /dev/null
+++ b/arch/powerpc/kernel/vio.c
@@ -0,0 +1,271 @@
1/*
2 * IBM PowerPC Virtual I/O Infrastructure Support.
3 *
4 * Copyright (c) 2003-2005 IBM Corp.
5 * Dave Engebretsen engebret@us.ibm.com
6 * Santiago Leon santil@us.ibm.com
7 * Hollis Blanchard <hollisb@us.ibm.com>
8 * Stephen Rothwell
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/init.h>
17#include <linux/console.h>
18#include <linux/module.h>
19#include <linux/mm.h>
20#include <linux/dma-mapping.h>
21#include <asm/iommu.h>
22#include <asm/dma.h>
23#include <asm/vio.h>
24
25static const struct vio_device_id *vio_match_device(
26 const struct vio_device_id *, const struct vio_dev *);
27
28struct vio_dev vio_bus_device = { /* fake "parent" device */
29 .name = vio_bus_device.dev.bus_id,
30 .type = "",
31 .dev.bus_id = "vio",
32 .dev.bus = &vio_bus_type,
33};
34
35static struct vio_bus_ops vio_bus_ops;
36
37/*
38 * Convert from struct device to struct vio_dev and pass to driver.
39 * dev->driver has already been set by generic code because vio_bus_match
40 * succeeded.
41 */
42static int vio_bus_probe(struct device *dev)
43{
44 struct vio_dev *viodev = to_vio_dev(dev);
45 struct vio_driver *viodrv = to_vio_driver(dev->driver);
46 const struct vio_device_id *id;
47 int error = -ENODEV;
48
49 if (!viodrv->probe)
50 return error;
51
52 id = vio_match_device(viodrv->id_table, viodev);
53 if (id)
54 error = viodrv->probe(viodev, id);
55
56 return error;
57}
58
59/* convert from struct device to struct vio_dev and pass to driver. */
60static int vio_bus_remove(struct device *dev)
61{
62 struct vio_dev *viodev = to_vio_dev(dev);
63 struct vio_driver *viodrv = to_vio_driver(dev->driver);
64
65 if (viodrv->remove)
66 return viodrv->remove(viodev);
67
68 /* driver can't remove */
69 return 1;
70}
71
72/* convert from struct device to struct vio_dev and pass to driver. */
73static void vio_bus_shutdown(struct device *dev)
74{
75 struct vio_dev *viodev = to_vio_dev(dev);
76 struct vio_driver *viodrv = to_vio_driver(dev->driver);
77
78 if (viodrv->shutdown)
79 viodrv->shutdown(viodev);
80}
81
82/**
83 * vio_register_driver: - Register a new vio driver
84 * @drv: The vio_driver structure to be registered.
85 */
86int vio_register_driver(struct vio_driver *viodrv)
87{
88 printk(KERN_DEBUG "%s: driver %s registering\n", __FUNCTION__,
89 viodrv->driver.name);
90
91 /* fill in 'struct driver' fields */
92 viodrv->driver.bus = &vio_bus_type;
93 viodrv->driver.probe = vio_bus_probe;
94 viodrv->driver.remove = vio_bus_remove;
95 viodrv->driver.shutdown = vio_bus_shutdown;
96
97 return driver_register(&viodrv->driver);
98}
99EXPORT_SYMBOL(vio_register_driver);
100
101/**
102 * vio_unregister_driver - Remove registration of vio driver.
103 * @driver: The vio_driver struct to be removed form registration
104 */
105void vio_unregister_driver(struct vio_driver *viodrv)
106{
107 driver_unregister(&viodrv->driver);
108}
109EXPORT_SYMBOL(vio_unregister_driver);
110
111/**
112 * vio_match_device: - Tell if a VIO device has a matching
113 * VIO device id structure.
114 * @ids: array of VIO device id structures to search in
115 * @dev: the VIO device structure to match against
116 *
117 * Used by a driver to check whether a VIO device present in the
118 * system is in its list of supported devices. Returns the matching
119 * vio_device_id structure or NULL if there is no match.
120 */
121static const struct vio_device_id *vio_match_device(
122 const struct vio_device_id *ids, const struct vio_dev *dev)
123{
124 while (ids->type[0] != '\0') {
125 if (vio_bus_ops.match(ids, dev))
126 return ids;
127 ids++;
128 }
129 return NULL;
130}
131
132/**
133 * vio_bus_init: - Initialize the virtual IO bus
134 */
135int __init vio_bus_init(struct vio_bus_ops *ops)
136{
137 int err;
138
139 vio_bus_ops = *ops;
140
141 err = bus_register(&vio_bus_type);
142 if (err) {
143 printk(KERN_ERR "failed to register VIO bus\n");
144 return err;
145 }
146
147 /*
148 * The fake parent of all vio devices, just to give us
149 * a nice directory
150 */
151 err = device_register(&vio_bus_device.dev);
152 if (err) {
153 printk(KERN_WARNING "%s: device_register returned %i\n",
154 __FUNCTION__, err);
155 return err;
156 }
157
158 return 0;
159}
160
161/* vio_dev refcount hit 0 */
162static void __devinit vio_dev_release(struct device *dev)
163{
164 if (vio_bus_ops.release_device)
165 vio_bus_ops.release_device(dev);
166 kfree(to_vio_dev(dev));
167}
168
169static ssize_t viodev_show_name(struct device *dev,
170 struct device_attribute *attr, char *buf)
171{
172 return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
173}
174DEVICE_ATTR(name, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_name, NULL);
175
176struct vio_dev * __devinit vio_register_device(struct vio_dev *viodev)
177{
178 /* init generic 'struct device' fields: */
179 viodev->dev.parent = &vio_bus_device.dev;
180 viodev->dev.bus = &vio_bus_type;
181 viodev->dev.release = vio_dev_release;
182
183 /* register with generic device framework */
184 if (device_register(&viodev->dev)) {
185 printk(KERN_ERR "%s: failed to register device %s\n",
186 __FUNCTION__, viodev->dev.bus_id);
187 return NULL;
188 }
189 device_create_file(&viodev->dev, &dev_attr_name);
190
191 return viodev;
192}
193
194void __devinit vio_unregister_device(struct vio_dev *viodev)
195{
196 if (vio_bus_ops.unregister_device)
197 vio_bus_ops.unregister_device(viodev);
198 device_remove_file(&viodev->dev, &dev_attr_name);
199 device_unregister(&viodev->dev);
200}
201EXPORT_SYMBOL(vio_unregister_device);
202
203static dma_addr_t vio_map_single(struct device *dev, void *vaddr,
204 size_t size, enum dma_data_direction direction)
205{
206 return iommu_map_single(to_vio_dev(dev)->iommu_table, vaddr, size,
207 direction);
208}
209
210static void vio_unmap_single(struct device *dev, dma_addr_t dma_handle,
211 size_t size, enum dma_data_direction direction)
212{
213 iommu_unmap_single(to_vio_dev(dev)->iommu_table, dma_handle, size,
214 direction);
215}
216
217static int vio_map_sg(struct device *dev, struct scatterlist *sglist,
218 int nelems, enum dma_data_direction direction)
219{
220 return iommu_map_sg(dev, to_vio_dev(dev)->iommu_table, sglist,
221 nelems, direction);
222}
223
224static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist,
225 int nelems, enum dma_data_direction direction)
226{
227 iommu_unmap_sg(to_vio_dev(dev)->iommu_table, sglist, nelems, direction);
228}
229
230static void *vio_alloc_coherent(struct device *dev, size_t size,
231 dma_addr_t *dma_handle, gfp_t flag)
232{
233 return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size,
234 dma_handle, flag);
235}
236
237static void vio_free_coherent(struct device *dev, size_t size,
238 void *vaddr, dma_addr_t dma_handle)
239{
240 iommu_free_coherent(to_vio_dev(dev)->iommu_table, size, vaddr,
241 dma_handle);
242}
243
244static int vio_dma_supported(struct device *dev, u64 mask)
245{
246 return 1;
247}
248
249struct dma_mapping_ops vio_dma_ops = {
250 .alloc_coherent = vio_alloc_coherent,
251 .free_coherent = vio_free_coherent,
252 .map_single = vio_map_single,
253 .unmap_single = vio_unmap_single,
254 .map_sg = vio_map_sg,
255 .unmap_sg = vio_unmap_sg,
256 .dma_supported = vio_dma_supported,
257};
258
259static int vio_bus_match(struct device *dev, struct device_driver *drv)
260{
261 const struct vio_dev *vio_dev = to_vio_dev(dev);
262 struct vio_driver *vio_drv = to_vio_driver(drv);
263 const struct vio_device_id *ids = vio_drv->id_table;
264
265 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
266}
267
268struct bus_type vio_bus_type = {
269 .name = "vio",
270 .match = vio_bus_match,
271};
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..d4dfcfbce272
--- /dev/null
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -0,0 +1,279 @@
1#include <linux/config.h>
2#ifdef CONFIG_PPC64
3#include <asm/page.h>
4#else
5#define PAGE_SIZE 4096
6#endif
7#include <asm-generic/vmlinux.lds.h>
8
9#ifdef CONFIG_PPC64
10OUTPUT_ARCH(powerpc:common64)
11jiffies = jiffies_64;
12#else
13OUTPUT_ARCH(powerpc:common)
14jiffies = jiffies_64 + 4;
15#endif
16SECTIONS
17{
18 /* Sections to be discarded. */
19 /DISCARD/ : {
20 *(.exitcall.exit)
21 *(.exit.data)
22 }
23
24
25 /* Read-only sections, merged into text segment: */
26#ifdef CONFIG_PPC32
27 . = + SIZEOF_HEADERS;
28 .interp : { *(.interp) }
29 .hash : { *(.hash) }
30 .dynsym : { *(.dynsym) }
31 .dynstr : { *(.dynstr) }
32 .rel.text : { *(.rel.text) }
33 .rela.text : { *(.rela.text) }
34 .rel.data : { *(.rel.data) }
35 .rela.data : { *(.rela.data) }
36 .rel.rodata : { *(.rel.rodata) }
37 .rela.rodata : { *(.rela.rodata) }
38 .rel.got : { *(.rel.got) }
39 .rela.got : { *(.rela.got) }
40 .rel.ctors : { *(.rel.ctors) }
41 .rela.ctors : { *(.rela.ctors) }
42 .rel.dtors : { *(.rel.dtors) }
43 .rela.dtors : { *(.rela.dtors) }
44 .rel.bss : { *(.rel.bss) }
45 .rela.bss : { *(.rela.bss) }
46 .rel.plt : { *(.rel.plt) }
47 .rela.plt : { *(.rela.plt) }
48/* .init : { *(.init) } =0*/
49 .plt : { *(.plt) }
50#endif
51 .text : {
52 *(.text .text.*)
53 SCHED_TEXT
54 LOCK_TEXT
55 KPROBES_TEXT
56 *(.fixup)
57#ifdef CONFIG_PPC32
58 *(.got1)
59 __got2_start = .;
60 *(.got2)
61 __got2_end = .;
62#else
63 . = ALIGN(PAGE_SIZE);
64 _etext = .;
65#endif
66 }
67#ifdef CONFIG_PPC32
68 _etext = .;
69 PROVIDE (etext = .);
70
71 RODATA
72 .fini : { *(.fini) } =0
73 .ctors : { *(.ctors) }
74 .dtors : { *(.dtors) }
75
76 .fixup : { *(.fixup) }
77#endif
78
79 __ex_table : {
80 __start___ex_table = .;
81 *(__ex_table)
82 __stop___ex_table = .;
83 }
84
85 __bug_table : {
86 __start___bug_table = .;
87 *(__bug_table)
88 __stop___bug_table = .;
89 }
90
91#ifdef CONFIG_PPC64
92 __ftr_fixup : {
93 __start___ftr_fixup = .;
94 *(__ftr_fixup)
95 __stop___ftr_fixup = .;
96 }
97
98 RODATA
99#endif
100
101#ifdef CONFIG_PPC32
102 /* Read-write section, merged into data segment: */
103 . = ALIGN(PAGE_SIZE);
104 _sdata = .;
105 .data :
106 {
107 *(.data)
108 *(.data1)
109 *(.sdata)
110 *(.sdata2)
111 *(.got.plt) *(.got)
112 *(.dynamic)
113 CONSTRUCTORS
114 }
115
116 . = ALIGN(PAGE_SIZE);
117 __nosave_begin = .;
118 .data_nosave : { *(.data.nosave) }
119 . = ALIGN(PAGE_SIZE);
120 __nosave_end = .;
121
122 . = ALIGN(32);
123 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
124
125 _edata = .;
126 PROVIDE (edata = .);
127
128 . = ALIGN(8192);
129 .data.init_task : { *(.data.init_task) }
130#endif
131
132 /* will be freed after init */
133 . = ALIGN(PAGE_SIZE);
134 __init_begin = .;
135 .init.text : {
136 _sinittext = .;
137 *(.init.text)
138 _einittext = .;
139 }
140#ifdef CONFIG_PPC32
141 /* .exit.text is discarded at runtime, not link time,
142 to deal with references from __bug_table */
143 .exit.text : { *(.exit.text) }
144#endif
145 .init.data : {
146 *(.init.data);
147 __vtop_table_begin = .;
148 *(.vtop_fixup);
149 __vtop_table_end = .;
150 __ptov_table_begin = .;
151 *(.ptov_fixup);
152 __ptov_table_end = .;
153 }
154
155 . = ALIGN(16);
156 .init.setup : {
157 __setup_start = .;
158 *(.init.setup)
159 __setup_end = .;
160 }
161
162 .initcall.init : {
163 __initcall_start = .;
164 *(.initcall1.init)
165 *(.initcall2.init)
166 *(.initcall3.init)
167 *(.initcall4.init)
168 *(.initcall5.init)
169 *(.initcall6.init)
170 *(.initcall7.init)
171 __initcall_end = .;
172 }
173
174 .con_initcall.init : {
175 __con_initcall_start = .;
176 *(.con_initcall.init)
177 __con_initcall_end = .;
178 }
179
180 SECURITY_INIT
181
182#ifdef CONFIG_PPC32
183 __start___ftr_fixup = .;
184 __ftr_fixup : { *(__ftr_fixup) }
185 __stop___ftr_fixup = .;
186#else
187 . = ALIGN(PAGE_SIZE);
188 .init.ramfs : {
189 __initramfs_start = .;
190 *(.init.ramfs)
191 __initramfs_end = .;
192 }
193#endif
194
195#ifdef CONFIG_PPC32
196 . = ALIGN(32);
197#endif
198 .data.percpu : {
199 __per_cpu_start = .;
200 *(.data.percpu)
201 __per_cpu_end = .;
202 }
203
204 . = ALIGN(PAGE_SIZE);
205#ifdef CONFIG_PPC64
206 . = ALIGN(16384);
207 __init_end = .;
208 /* freed after init ends here */
209
210 /* Read/write sections */
211 . = ALIGN(PAGE_SIZE);
212 . = ALIGN(16384);
213 _sdata = .;
214 /* The initial task and kernel stack */
215 .data.init_task : {
216 *(.data.init_task)
217 }
218
219 . = ALIGN(PAGE_SIZE);
220 .data.page_aligned : {
221 *(.data.page_aligned)
222 }
223
224 .data.cacheline_aligned : {
225 *(.data.cacheline_aligned)
226 }
227
228 .data : {
229 *(.data .data.rel* .toc1)
230 *(.branch_lt)
231 }
232
233 .opd : {
234 *(.opd)
235 }
236
237 .got : {
238 __toc_start = .;
239 *(.got)
240 *(.toc)
241 . = ALIGN(PAGE_SIZE);
242 _edata = .;
243 }
244
245 . = ALIGN(PAGE_SIZE);
246#else
247 __initramfs_start = .;
248 .init.ramfs : {
249 *(.init.ramfs)
250 }
251 __initramfs_end = .;
252
253 . = ALIGN(4096);
254 __init_end = .;
255
256 . = ALIGN(4096);
257 _sextratext = .;
258 _eextratext = .;
259
260 __bss_start = .;
261#endif
262
263 .bss : {
264 __bss_start = .;
265 *(.sbss) *(.scommon)
266 *(.dynbss)
267 *(.bss)
268 *(COMMON)
269 __bss_stop = .;
270 }
271
272#ifdef CONFIG_PPC64
273 . = ALIGN(PAGE_SIZE);
274#endif
275 _end = . ;
276#ifdef CONFIG_PPC32
277 PROVIDE (end = .);
278#endif
279}