aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc/kernel
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-06-09 00:01:46 -0400
committerPaul Mackerras <paulus@samba.org>2008-06-10 07:40:22 -0400
commit917f0af9e5a9ceecf9e72537fabb501254ba321d (patch)
tree1ef207755c6d83ce4af93ef2b5e4645eebd65886 /arch/ppc/kernel
parent0f3d6bcd391b058c619fc30e8022e8a29fbf4bef (diff)
powerpc: Remove arch/ppc and include/asm-ppc
All the maintained platforms are now in arch/powerpc, so the old arch/ppc stuff can now go away. Acked-by: Adrian Bunk <bunk@kernel.org> Acked-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Becky Bruce <becky.bruce@freescale.com> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Acked-by: Grant Likely <grant.likely@secretlab.ca> Acked-by: Jochen Friedrich <jochen@scram.de> Acked-by: John Linn <john.linn@xilinx.com> Acked-by: Jon Loeliger <jdl@freescale.com> Acked-by: Josh Boyer <jwboyer@linux.vnet.ibm.com> Acked-by: Kumar Gala <galak@kernel.crashing.org> Acked-by: Olof Johansson <olof@lixom.net> Acked-by: Peter Korsgaard <jacmet@sunsite.dk> Acked-by: Scott Wood <scottwood@freescale.com> Acked-by: Sean MacLennan <smaclennan@pikatech.com> Acked-by: Segher Boessenkool <segher@kernel.crashing.org> Acked-by: Stefan Roese <sr@denx.de> Acked-by: Stephen Neuendorffer <stephen.neuendorffer@xilinx.com> Acked-by: Wolfgang Denk <wd@denx.de> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/ppc/kernel')
-rw-r--r--arch/ppc/kernel/Makefile21
-rw-r--r--arch/ppc/kernel/asm-offsets.c164
-rw-r--r--arch/ppc/kernel/cpu_setup_power4.S197
-rw-r--r--arch/ppc/kernel/entry.S960
-rw-r--r--arch/ppc/kernel/head.S1220
-rw-r--r--arch/ppc/kernel/head_44x.S769
-rw-r--r--arch/ppc/kernel/head_4xx.S1021
-rw-r--r--arch/ppc/kernel/head_8xx.S959
-rw-r--r--arch/ppc/kernel/head_booke.h308
-rw-r--r--arch/ppc/kernel/machine_kexec.c118
-rw-r--r--arch/ppc/kernel/misc.S868
-rw-r--r--arch/ppc/kernel/pci.c1233
-rw-r--r--arch/ppc/kernel/ppc-stub.c866
-rw-r--r--arch/ppc/kernel/ppc_htab.c464
-rw-r--r--arch/ppc/kernel/ppc_ksyms.c258
-rw-r--r--arch/ppc/kernel/relocate_kernel.S123
-rw-r--r--arch/ppc/kernel/setup.c572
-rw-r--r--arch/ppc/kernel/smp-tbsync.c180
-rw-r--r--arch/ppc/kernel/smp.c414
-rw-r--r--arch/ppc/kernel/softemu8xx.c147
-rw-r--r--arch/ppc/kernel/time.c445
-rw-r--r--arch/ppc/kernel/traps.c826
-rw-r--r--arch/ppc/kernel/vmlinux.lds.S164
23 files changed, 0 insertions, 12297 deletions
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
deleted file mode 100644
index 7b739054968f..000000000000
--- a/arch/ppc/kernel/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
1#
2# Makefile for the linux kernel.
3#
4extra-$(CONFIG_PPC_STD_MMU) := head.o
5extra-$(CONFIG_40x) := head_4xx.o
6extra-$(CONFIG_44x) := head_44x.o
7extra-$(CONFIG_8xx) := head_8xx.o
8extra-y += vmlinux.lds
9
10obj-y := entry.o traps.o time.o misc.o \
11 setup.o \
12 ppc_htab.o
13obj-$(CONFIG_MODULES) += ppc_ksyms.o
14obj-$(CONFIG_PCI) += pci.o
15obj-$(CONFIG_KGDB) += ppc-stub.o
16obj-$(CONFIG_SMP) += smp.o smp-tbsync.o
17obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
18
19ifndef CONFIG_MATH_EMULATION
20obj-$(CONFIG_8xx) += softemu8xx.o
21endif
diff --git a/arch/ppc/kernel/asm-offsets.c b/arch/ppc/kernel/asm-offsets.c
deleted file mode 100644
index 8dcbdd6c2d2c..000000000000
--- a/arch/ppc/kernel/asm-offsets.c
+++ /dev/null
@@ -1,164 +0,0 @@
1/*
2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 */
10
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/suspend.h>
19#include <linux/mman.h>
20#include <linux/mm.h>
21#include <linux/kbuild.h>
22
23#include <asm/io.h>
24#include <asm/page.h>
25#include <asm/pgtable.h>
26#include <asm/processor.h>
27#include <asm/cputable.h>
28#include <asm/thread_info.h>
29#include <asm/vdso_datapage.h>
30
31int
32main(void)
33{
34 DEFINE(THREAD, offsetof(struct task_struct, thread));
35 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
36 DEFINE(MM, offsetof(struct task_struct, mm));
37 DEFINE(PTRACE, offsetof(struct task_struct, ptrace));
38 DEFINE(KSP, offsetof(struct thread_struct, ksp));
39 DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
40 DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
41 DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
42 DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
43 DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
44#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
45 DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
46 DEFINE(PT_PTRACED, PT_PTRACED);
47#endif
48#ifdef CONFIG_ALTIVEC
49 DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
50 DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
51 DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
52 DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
53#endif /* CONFIG_ALTIVEC */
54 /* Interrupt register frame */
55 DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
56 DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
57 /* in fact we only use gpr0 - gpr9 and gpr20 - gpr23 */
58 DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
59 DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
60 DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
61 DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
62 DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
63 DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
64 DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
65 DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
66 DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
67 DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
68 DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
69 DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
70 DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
71 DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
72 DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14]));
73 DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15]));
74 DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16]));
75 DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17]));
76 DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18]));
77 DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19]));
78 DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20]));
79 DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21]));
80 DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22]));
81 DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23]));
82 DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24]));
83 DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25]));
84 DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26]));
85 DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27]));
86 DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28]));
87 DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29]));
88 DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30]));
89 DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31]));
90 /* Note: these symbols include _ because they overlap with special
91 * register names
92 */
93 DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
94 DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
95 DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
96 DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
97 DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
98 DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq));
99 DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
100 DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
101 DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
102 /* The PowerPC 400-class & Book-E processors have neither the DAR nor the DSISR
103 * SPRs. Hence, we overload them to hold the similar DEAR and ESR SPRs
104 * for such processors. For critical interrupts we use them to
105 * hold SRR0 and SRR1.
106 */
107 DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
108 DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
109 DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
110 DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
111 DEFINE(TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
112 DEFINE(CLONE_VM, CLONE_VM);
113 DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
114 DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
115
116 /* About the CPU features table */
117 DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
118 DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
119 DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
120 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
121 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
122
123 DEFINE(TI_TASK, offsetof(struct thread_info, task));
124 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
125 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
126 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
127 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
128 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
129
130 DEFINE(pbe_address, offsetof(struct pbe, address));
131 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
132 DEFINE(pbe_next, offsetof(struct pbe, next));
133
134 DEFINE(TASK_SIZE, TASK_SIZE);
135 DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
136
137 /* datapage offsets for use by vdso */
138 DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct vdso_data, tb_orig_stamp));
139 DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct vdso_data, tb_ticks_per_sec));
140 DEFINE(CFG_TB_TO_XS, offsetof(struct vdso_data, tb_to_xs));
141 DEFINE(CFG_STAMP_XSEC, offsetof(struct vdso_data, stamp_xsec));
142 DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct vdso_data, tb_update_count));
143 DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct vdso_data, tz_minuteswest));
144 DEFINE(CFG_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime));
145 DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32));
146 DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec));
147 DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
148 DEFINE(TVAL32_TV_SEC, offsetof(struct timeval, tv_sec));
149 DEFINE(TVAL32_TV_USEC, offsetof(struct timeval, tv_usec));
150 DEFINE(TSPEC32_TV_SEC, offsetof(struct timespec, tv_sec));
151 DEFINE(TSPEC32_TV_NSEC, offsetof(struct timespec, tv_nsec));
152
153 /* timeval/timezone offsets for use by vdso */
154 DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
155 DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
156
157 /* Other bits used by the vdso */
158 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
159 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
160 DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
161 DEFINE(CLOCK_REALTIME_RES, TICK_NSEC);
162
163 return 0;
164}
diff --git a/arch/ppc/kernel/cpu_setup_power4.S b/arch/ppc/kernel/cpu_setup_power4.S
deleted file mode 100644
index 6a674e834eeb..000000000000
--- a/arch/ppc/kernel/cpu_setup_power4.S
+++ /dev/null
@@ -1,197 +0,0 @@
1/*
2 * This file contains low level CPU setup functions.
3 * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 */
11
12#include <asm/processor.h>
13#include <asm/page.h>
14#include <asm/ppc_asm.h>
15#include <asm/cputable.h>
16#include <asm/asm-offsets.h>
17#include <asm/cache.h>
18
19_GLOBAL(__970_cpu_preinit)
20 /*
21 * Deal only with PPC970 and PPC970FX.
22 */
23 mfspr r0,SPRN_PVR
24 srwi r0,r0,16
25 cmpwi cr0,r0,0x39
26 cmpwi cr1,r0,0x3c
27 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
28 bnelr
29
30 /* Make sure HID4:rm_ci is off before MMU is turned off, that large
31 * pages are enabled with HID4:61 and clear HID5:DCBZ_size and
32 * HID5:DCBZ32_ill
33 */
34 li r0,0
35 mfspr r11,SPRN_HID4
36 rldimi r11,r0,40,23 /* clear bit 23 (rm_ci) */
37 rldimi r11,r0,2,61 /* clear bit 61 (lg_pg_en) */
38 sync
39 mtspr SPRN_HID4,r11
40 isync
41 sync
42 mfspr r11,SPRN_HID5
43 rldimi r11,r0,6,56 /* clear bits 56 & 57 (DCBZ*) */
44 sync
45 mtspr SPRN_HID5,r11
46 isync
47 sync
48
49 /* Setup some basic HID1 features */
50 mfspr r0,SPRN_HID1
51 li r11,0x1200 /* enable i-fetch cacheability */
52 sldi r11,r11,44 /* and prefetch */
53 or r0,r0,r11
54 mtspr SPRN_HID1,r0
55 mtspr SPRN_HID1,r0
56 isync
57
58 /* Clear HIOR */
59 li r0,0
60 sync
61 mtspr SPRN_HIOR,0 /* Clear interrupt prefix */
62 isync
63 blr
64
65_GLOBAL(__setup_cpu_ppc970)
66 mfspr r0,SPRN_HID0
67 li r11,5 /* clear DOZE and SLEEP */
68 rldimi r0,r11,52,8 /* set NAP and DPM */
69 mtspr SPRN_HID0,r0
70 mfspr r0,SPRN_HID0
71 mfspr r0,SPRN_HID0
72 mfspr r0,SPRN_HID0
73 mfspr r0,SPRN_HID0
74 mfspr r0,SPRN_HID0
75 mfspr r0,SPRN_HID0
76 sync
77 isync
78 blr
79
80/* Definitions for the table use to save CPU states */
81#define CS_HID0 0
82#define CS_HID1 8
83#define CS_HID4 16
84#define CS_HID5 24
85#define CS_SIZE 32
86
87 .data
88 .balign L1_CACHE_BYTES
89cpu_state_storage:
90 .space CS_SIZE
91 .balign L1_CACHE_BYTES,0
92 .text
93
94/* Called in normal context to backup CPU 0 state. This
95 * does not include cache settings. This function is also
96 * called for machine sleep. This does not include the MMU
97 * setup, BATs, etc... but rather the "special" registers
98 * like HID0, HID1, HID4, etc...
99 */
100_GLOBAL(__save_cpu_setup)
101 /* Some CR fields are volatile, we back it up all */
102 mfcr r7
103
104 /* Get storage ptr */
105 lis r5,cpu_state_storage@h
106 ori r5,r5,cpu_state_storage@l
107
108 /* We only deal with 970 for now */
109 mfspr r0,SPRN_PVR
110 srwi r0,r0,16
111 cmpwi cr0,r0,0x39
112 cmpwi cr1,r0,0x3c
113 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
114 bne 1f
115
116 /* Save HID0,1,4 and 5 */
117 mfspr r3,SPRN_HID0
118 std r3,CS_HID0(r5)
119 mfspr r3,SPRN_HID1
120 std r3,CS_HID1(r5)
121 mfspr r3,SPRN_HID4
122 std r3,CS_HID4(r5)
123 mfspr r3,SPRN_HID5
124 std r3,CS_HID5(r5)
125
1261:
127 mtcr r7
128 blr
129
130/* Called with no MMU context (typically MSR:IR/DR off) to
131 * restore CPU state as backed up by the previous
132 * function. This does not include cache setting
133 */
134_GLOBAL(__restore_cpu_setup)
135 /* Some CR fields are volatile, we back it up all */
136 mfcr r7
137
138 /* Get storage ptr */
139 lis r5,(cpu_state_storage-KERNELBASE)@h
140 ori r5,r5,cpu_state_storage@l
141
142 /* We only deal with 970 for now */
143 mfspr r0,SPRN_PVR
144 srwi r0,r0,16
145 cmpwi cr0,r0,0x39
146 cmpwi cr1,r0,0x3c
147 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
148 bne 1f
149
150 /* Clear interrupt prefix */
151 li r0,0
152 sync
153 mtspr SPRN_HIOR,0
154 isync
155
156 /* Restore HID0 */
157 ld r3,CS_HID0(r5)
158 sync
159 isync
160 mtspr SPRN_HID0,r3
161 mfspr r3,SPRN_HID0
162 mfspr r3,SPRN_HID0
163 mfspr r3,SPRN_HID0
164 mfspr r3,SPRN_HID0
165 mfspr r3,SPRN_HID0
166 mfspr r3,SPRN_HID0
167 sync
168 isync
169
170 /* Restore HID1 */
171 ld r3,CS_HID1(r5)
172 sync
173 isync
174 mtspr SPRN_HID1,r3
175 mtspr SPRN_HID1,r3
176 sync
177 isync
178
179 /* Restore HID4 */
180 ld r3,CS_HID4(r5)
181 sync
182 isync
183 mtspr SPRN_HID4,r3
184 sync
185 isync
186
187 /* Restore HID5 */
188 ld r3,CS_HID5(r5)
189 sync
190 isync
191 mtspr SPRN_HID5,r3
192 sync
193 isync
1941:
195 mtcr r7
196 blr
197
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
deleted file mode 100644
index fcd830a292e2..000000000000
--- a/arch/ppc/kernel/entry.S
+++ /dev/null
@@ -1,960 +0,0 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/processor.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
33
34#undef SHOW_SYSCALLS
35#undef SHOW_SYSCALLS_TASK
36
37/*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40#if MSR_KERNEL >= 0x10000
41#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
42#else
43#define LOAD_MSR_KERNEL(r, x) li r,(x)
44#endif
45
46#ifdef CONFIG_BOOKE
47#include "head_booke.h"
48#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
49 mtspr exc_level##_SPRG,r8; \
50 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
51 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
52 stw r0,GPR10(r11); \
53 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
54 stw r0,GPR11(r11); \
55 mfspr r8,exc_level##_SPRG
56
57 .globl mcheck_transfer_to_handler
58mcheck_transfer_to_handler:
59 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
60 b transfer_to_handler_full
61
62 .globl debug_transfer_to_handler
63debug_transfer_to_handler:
64 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
65 b transfer_to_handler_full
66
67 .globl crit_transfer_to_handler
68crit_transfer_to_handler:
69 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
70 /* fall through */
71#endif
72
73#ifdef CONFIG_40x
74 .globl crit_transfer_to_handler
75crit_transfer_to_handler:
76 lwz r0,crit_r10@l(0)
77 stw r0,GPR10(r11)
78 lwz r0,crit_r11@l(0)
79 stw r0,GPR11(r11)
80 /* fall through */
81#endif
82
83/*
84 * This code finishes saving the registers to the exception frame
85 * and jumps to the appropriate handler for the exception, turning
86 * on address translation.
87 * Note that we rely on the caller having set cr0.eq iff the exception
88 * occurred in kernel mode (i.e. MSR:PR = 0).
89 */
90 .globl transfer_to_handler_full
91transfer_to_handler_full:
92 SAVE_NVGPRS(r11)
93 /* fall through */
94
95 .globl transfer_to_handler
96transfer_to_handler:
97 stw r2,GPR2(r11)
98 stw r12,_NIP(r11)
99 stw r9,_MSR(r11)
100 andi. r2,r9,MSR_PR
101 mfctr r12
102 mfspr r2,SPRN_XER
103 stw r12,_CTR(r11)
104 stw r2,_XER(r11)
105 mfspr r12,SPRN_SPRG3
106 addi r2,r12,-THREAD
107 tovirt(r2,r2) /* set r2 to current */
108 beq 2f /* if from user, fix up THREAD.regs */
109 addi r11,r1,STACK_FRAME_OVERHEAD
110 stw r11,PT_REGS(r12)
111#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
112 /* Check to see if the dbcr0 register is set up to debug. Use the
113 single-step bit to do this. */
114 lwz r12,THREAD_DBCR0(r12)
115 andis. r12,r12,DBCR0_IC@h
116 beq+ 3f
117 /* From user and task is ptraced - load up global dbcr0 */
118 li r12,-1 /* clear all pending debug events */
119 mtspr SPRN_DBSR,r12
120 lis r11,global_dbcr0@ha
121 tophys(r11,r11)
122 addi r11,r11,global_dbcr0@l
123 lwz r12,0(r11)
124 mtspr SPRN_DBCR0,r12
125 lwz r12,4(r11)
126 addi r12,r12,-1
127 stw r12,4(r11)
128#endif
129 b 3f
130
1312: /* if from kernel, check interrupted DOZE/NAP mode and
132 * check for stack overflow
133 */
134 lwz r9,THREAD_INFO-THREAD(r12)
135 cmplw r1,r9 /* if r1 <= current->thread_info */
136 ble- stack_ovf /* then the kernel stack overflowed */
1375:
138#ifdef CONFIG_6xx
139 tophys(r9,r9) /* check local flags */
140 lwz r12,TI_LOCAL_FLAGS(r9)
141 mtcrf 0x01,r12
142 bt- 31-TLF_NAPPING,4f
143#endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont
145transfer_to_handler_cont:
1463:
147 mflr r9
148 lwz r11,0(r9) /* virtual address of handler */
149 lwz r9,4(r9) /* where to go when done */
150 mtspr SPRN_SRR0,r11
151 mtspr SPRN_SRR1,r10
152 mtlr r9
153 SYNC
154 RFI /* jump to handler, enable MMU */
155
156#ifdef CONFIG_6xx
1574: rlwinm r12,r12,0,~_TLF_NAPPING
158 stw r12,TI_LOCAL_FLAGS(r9)
159 b power_save_6xx_restore
160#endif
161
162/*
163 * On kernel stack overflow, load up an initial stack pointer
164 * and call StackOverflow(regs), which should not return.
165 */
166stack_ovf:
167 /* sometimes we use a statically-allocated stack, which is OK. */
168 lis r12,_end@h
169 ori r12,r12,_end@l
170 cmplw r1,r12
171 ble 5b /* r1 <= &_end is OK */
172 SAVE_NVGPRS(r11)
173 addi r3,r1,STACK_FRAME_OVERHEAD
174 lis r1,init_thread_union@ha
175 addi r1,r1,init_thread_union@l
176 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
177 lis r9,StackOverflow@ha
178 addi r9,r9,StackOverflow@l
179 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
180 FIX_SRR1(r10,r12)
181 mtspr SPRN_SRR0,r9
182 mtspr SPRN_SRR1,r10
183 SYNC
184 RFI
185
186/*
187 * Handle a system call.
188 */
189 .stabs "arch/ppc/kernel/",N_SO,0,0,0f
190 .stabs "entry.S",N_SO,0,0,0f
1910:
192
193_GLOBAL(DoSyscall)
194 stw r3,ORIG_GPR3(r1)
195 li r12,0
196 stw r12,RESULT(r1)
197 lwz r11,_CCR(r1) /* Clear SO bit in CR */
198 rlwinm r11,r11,0,4,2
199 stw r11,_CCR(r1)
200#ifdef SHOW_SYSCALLS
201 bl do_show_syscall
202#endif /* SHOW_SYSCALLS */
203 rlwinm r10,r1,0,0,18 /* current_thread_info() */
204 lwz r11,TI_FLAGS(r10)
205 andi. r11,r11,_TIF_SYSCALL_T_OR_A
206 bne- syscall_dotrace
207syscall_dotrace_cont:
208 cmplwi 0,r0,NR_syscalls
209 lis r10,sys_call_table@h
210 ori r10,r10,sys_call_table@l
211 slwi r0,r0,2
212 bge- 66f
213 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
214 mtlr r10
215 addi r9,r1,STACK_FRAME_OVERHEAD
216 PPC440EP_ERR42
217 blrl /* Call handler */
218 .globl ret_from_syscall
219ret_from_syscall:
220#ifdef SHOW_SYSCALLS
221 bl do_show_syscall_exit
222#endif
223 mr r6,r3
224 rlwinm r12,r1,0,0,18 /* current_thread_info() */
225 /* disable interrupts so current_thread_info()->flags can't change */
226 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
227 SYNC
228 MTMSRD(r10)
229 lwz r9,TI_FLAGS(r12)
230 li r8,-_LAST_ERRNO
231 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
232 bne- syscall_exit_work
233 cmplw 0,r3,r8
234 blt+ syscall_exit_cont
235 lwz r11,_CCR(r1) /* Load CR */
236 neg r3,r3
237 oris r11,r11,0x1000 /* Set SO bit in CR */
238 stw r11,_CCR(r1)
239syscall_exit_cont:
240#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
241 /* If the process has its own DBCR0 value, load it up. The single
242 step bit tells us that dbcr0 should be loaded. */
243 lwz r0,THREAD+THREAD_DBCR0(r2)
244 andis. r10,r0,DBCR0_IC@h
245 bnel- load_dbcr0
246#endif
247#ifdef CONFIG_44x
248 lis r4,icache_44x_need_flush@ha
249 lwz r5,icache_44x_need_flush@l(r4)
250 cmplwi cr0,r5,0
251 bne- 2f
2521:
253#endif /* CONFIG_44x */
254BEGIN_FTR_SECTION
255 lwarx r7,0,r1
256END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
257 stwcx. r0,0,r1 /* to clear the reservation */
258 lwz r4,_LINK(r1)
259 lwz r5,_CCR(r1)
260 mtlr r4
261 mtcr r5
262 lwz r7,_NIP(r1)
263 lwz r8,_MSR(r1)
264 FIX_SRR1(r8, r0)
265 lwz r2,GPR2(r1)
266 lwz r1,GPR1(r1)
267 mtspr SPRN_SRR0,r7
268 mtspr SPRN_SRR1,r8
269 SYNC
270 RFI
271#ifdef CONFIG_44x
2722: li r7,0
273 iccci r0,r0
274 stw r7,icache_44x_need_flush@l(r4)
275 b 1b
276#endif /* CONFIG_44x */
277
27866: li r3,-ENOSYS
279 b ret_from_syscall
280
281 .globl ret_from_fork
282ret_from_fork:
283 REST_NVGPRS(r1)
284 bl schedule_tail
285 li r3,0
286 b ret_from_syscall
287
288/* Traced system call support */
289syscall_dotrace:
290 SAVE_NVGPRS(r1)
291 li r0,0xc00
292 stw r0,TRAP(r1)
293 addi r3,r1,STACK_FRAME_OVERHEAD
294 bl do_syscall_trace_enter
295 lwz r0,GPR0(r1) /* Restore original registers */
296 lwz r3,GPR3(r1)
297 lwz r4,GPR4(r1)
298 lwz r5,GPR5(r1)
299 lwz r6,GPR6(r1)
300 lwz r7,GPR7(r1)
301 lwz r8,GPR8(r1)
302 REST_NVGPRS(r1)
303 b syscall_dotrace_cont
304
305syscall_exit_work:
306 andi. r0,r9,_TIF_RESTOREALL
307 beq+ 0f
308 REST_NVGPRS(r1)
309 b 2f
3100: cmplw 0,r3,r8
311 blt+ 1f
312 andi. r0,r9,_TIF_NOERROR
313 bne- 1f
314 lwz r11,_CCR(r1) /* Load CR */
315 neg r3,r3
316 oris r11,r11,0x1000 /* Set SO bit in CR */
317 stw r11,_CCR(r1)
318
3191: stw r6,RESULT(r1) /* Save result */
320 stw r3,GPR3(r1) /* Update return value */
3212: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
322 beq 4f
323
324 /* Clear per-syscall TIF flags if any are set. */
325
326 li r11,_TIF_PERSYSCALL_MASK
327 addi r12,r12,TI_FLAGS
3283: lwarx r8,0,r12
329 andc r8,r8,r11
330#ifdef CONFIG_IBM405_ERR77
331 dcbt 0,r12
332#endif
333 stwcx. r8,0,r12
334 bne- 3b
335 subi r12,r12,TI_FLAGS
336
3374: /* Anything which requires enabling interrupts? */
338 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
339 beq ret_from_except
340
341 /* Re-enable interrupts */
342 ori r10,r10,MSR_EE
343 SYNC
344 MTMSRD(r10)
345
346 /* Save NVGPRS if they're not saved already */
347 lwz r4,TRAP(r1)
348 andi. r4,r4,1
349 beq 5f
350 SAVE_NVGPRS(r1)
351 li r4,0xc00
352 stw r4,TRAP(r1)
3535:
354 addi r3,r1,STACK_FRAME_OVERHEAD
355 bl do_syscall_trace_leave
356 b ret_from_except_full
357
358#ifdef SHOW_SYSCALLS
359do_show_syscall:
360#ifdef SHOW_SYSCALLS_TASK
361 lis r11,show_syscalls_task@ha
362 lwz r11,show_syscalls_task@l(r11)
363 cmp 0,r2,r11
364 bnelr
365#endif
366 stw r31,GPR31(r1)
367 mflr r31
368 lis r3,7f@ha
369 addi r3,r3,7f@l
370 lwz r4,GPR0(r1)
371 lwz r5,GPR3(r1)
372 lwz r6,GPR4(r1)
373 lwz r7,GPR5(r1)
374 lwz r8,GPR6(r1)
375 lwz r9,GPR7(r1)
376 bl printk
377 lis r3,77f@ha
378 addi r3,r3,77f@l
379 lwz r4,GPR8(r1)
380 mr r5,r2
381 bl printk
382 lwz r0,GPR0(r1)
383 lwz r3,GPR3(r1)
384 lwz r4,GPR4(r1)
385 lwz r5,GPR5(r1)
386 lwz r6,GPR6(r1)
387 lwz r7,GPR7(r1)
388 lwz r8,GPR8(r1)
389 mtlr r31
390 lwz r31,GPR31(r1)
391 blr
392
393do_show_syscall_exit:
394#ifdef SHOW_SYSCALLS_TASK
395 lis r11,show_syscalls_task@ha
396 lwz r11,show_syscalls_task@l(r11)
397 cmp 0,r2,r11
398 bnelr
399#endif
400 stw r31,GPR31(r1)
401 mflr r31
402 stw r3,RESULT(r1) /* Save result */
403 mr r4,r3
404 lis r3,79f@ha
405 addi r3,r3,79f@l
406 bl printk
407 lwz r3,RESULT(r1)
408 mtlr r31
409 lwz r31,GPR31(r1)
410 blr
411
4127: .string "syscall %d(%x, %x, %x, %x, %x, "
41377: .string "%x), current=%p\n"
41479: .string " -> %x\n"
415 .align 2,0
416
417#ifdef SHOW_SYSCALLS_TASK
418 .data
419 .globl show_syscalls_task
420show_syscalls_task:
421 .long -1
422 .text
423#endif
424#endif /* SHOW_SYSCALLS */
425
426/*
427 * The fork/clone functions need to copy the full register set into
428 * the child process. Therefore we need to save all the nonvolatile
429 * registers (r13 - r31) before calling the C code.
430 */
431 .globl ppc_fork
432ppc_fork:
433 SAVE_NVGPRS(r1)
434 lwz r0,TRAP(r1)
435 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
436 stw r0,TRAP(r1) /* register set saved */
437 b sys_fork
438
439 .globl ppc_vfork
440ppc_vfork:
441 SAVE_NVGPRS(r1)
442 lwz r0,TRAP(r1)
443 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
444 stw r0,TRAP(r1) /* register set saved */
445 b sys_vfork
446
447 .globl ppc_clone
448ppc_clone:
449 SAVE_NVGPRS(r1)
450 lwz r0,TRAP(r1)
451 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
452 stw r0,TRAP(r1) /* register set saved */
453 b sys_clone
454
455 .globl ppc_swapcontext
456ppc_swapcontext:
457 SAVE_NVGPRS(r1)
458 lwz r0,TRAP(r1)
459 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
460 stw r0,TRAP(r1) /* register set saved */
461 b sys_swapcontext
462
463/*
464 * Top-level page fault handling.
465 * This is in assembler because if do_page_fault tells us that
466 * it is a bad kernel page fault, we want to save the non-volatile
467 * registers before calling bad_page_fault.
468 */
469 .globl handle_page_fault
470handle_page_fault:
471 stw r4,_DAR(r1)
472 addi r3,r1,STACK_FRAME_OVERHEAD
473 bl do_page_fault
474 cmpwi r3,0
475 beq+ ret_from_except
476 SAVE_NVGPRS(r1)
477 lwz r0,TRAP(r1)
478 clrrwi r0,r0,1
479 stw r0,TRAP(r1)
480 mr r5,r3
481 addi r3,r1,STACK_FRAME_OVERHEAD
482 lwz r4,_DAR(r1)
483 bl bad_page_fault
484 b ret_from_except_full
485
486/*
487 * This routine switches between two different tasks. The process
488 * state of one is saved on its kernel stack. Then the state
489 * of the other is restored from its kernel stack. The memory
490 * management hardware is updated to the second process's state.
491 * Finally, we can return to the second process.
492 * On entry, r3 points to the THREAD for the current task, r4
493 * points to the THREAD for the new task.
494 *
495 * This routine is always called with interrupts disabled.
496 *
497 * Note: there are two ways to get to the "going out" portion
498 * of this code; either by coming in via the entry (_switch)
499 * or via "fork" which must set up an environment equivalent
500 * to the "_switch" path. If you change this , you'll have to
501 * change the fork code also.
502 *
503 * The code which creates the new task context is in 'copy_thread'
504 * in arch/ppc/kernel/process.c
505 */
506_GLOBAL(_switch)
507 stwu r1,-INT_FRAME_SIZE(r1)
508 mflr r0
509 stw r0,INT_FRAME_SIZE+4(r1)
510 /* r3-r12 are caller saved -- Cort */
511 SAVE_NVGPRS(r1)
512 stw r0,_NIP(r1) /* Return to switch caller */
513 mfmsr r11
514 li r0,MSR_FP /* Disable floating-point */
515#ifdef CONFIG_ALTIVEC
516BEGIN_FTR_SECTION
517 oris r0,r0,MSR_VEC@h /* Disable altivec */
518 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
519 stw r12,THREAD+THREAD_VRSAVE(r2)
520END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
521#endif /* CONFIG_ALTIVEC */
522 and. r0,r0,r11 /* FP or altivec enabled? */
523 beq+ 1f
524 andc r11,r11,r0
525 MTMSRD(r11)
526 isync
5271: stw r11,_MSR(r1)
528 mfcr r10
529 stw r10,_CCR(r1)
530 stw r1,KSP(r3) /* Set old stack pointer */
531
532#ifdef CONFIG_SMP
533 /* We need a sync somewhere here to make sure that if the
534 * previous task gets rescheduled on another CPU, it sees all
535 * stores it has performed on this one.
536 */
537 sync
538#endif /* CONFIG_SMP */
539
540 tophys(r0,r4)
541 CLR_TOP32(r0)
542 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
543 lwz r1,KSP(r4) /* Load new stack pointer */
544
545 /* save the old current 'last' for return value */
546 mr r3,r2
547 addi r2,r4,-THREAD /* Update current */
548
549#ifdef CONFIG_ALTIVEC
550BEGIN_FTR_SECTION
551 lwz r0,THREAD+THREAD_VRSAVE(r2)
552 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
553END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
554#endif /* CONFIG_ALTIVEC */
555 lwz r0,_CCR(r1)
556 mtcrf 0xFF,r0
557 /* r3-r12 are destroyed -- Cort */
558 REST_NVGPRS(r1)
559
560 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
561 mtlr r4
562 addi r1,r1,INT_FRAME_SIZE
563 blr
564
565 .globl fast_exception_return
566fast_exception_return:
567#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
568 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
569 beq 1f /* if not, we've got problems */
570#endif
571
5722: REST_4GPRS(3, r11)
573 lwz r10,_CCR(r11)
574 REST_GPR(1, r11)
575 mtcr r10
576 lwz r10,_LINK(r11)
577 mtlr r10
578 REST_GPR(10, r11)
579 mtspr SPRN_SRR1,r9
580 mtspr SPRN_SRR0,r12
581 REST_GPR(9, r11)
582 REST_GPR(12, r11)
583 lwz r11,GPR11(r11)
584 SYNC
585 RFI
586
587#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
588/* check if the exception happened in a restartable section */
5891: lis r3,exc_exit_restart_end@ha
590 addi r3,r3,exc_exit_restart_end@l
591 cmplw r12,r3
592 bge 3f
593 lis r4,exc_exit_restart@ha
594 addi r4,r4,exc_exit_restart@l
595 cmplw r12,r4
596 blt 3f
597 lis r3,fee_restarts@ha
598 tophys(r3,r3)
599 lwz r5,fee_restarts@l(r3)
600 addi r5,r5,1
601 stw r5,fee_restarts@l(r3)
602 mr r12,r4 /* restart at exc_exit_restart */
603 b 2b
604
605 .section .bss
606 .align 2
607fee_restarts:
608 .space 4
609 .previous
610
611/* aargh, a nonrecoverable interrupt, panic */
612/* aargh, we don't know which trap this is */
613/* but the 601 doesn't implement the RI bit, so assume it's OK */
6143:
615BEGIN_FTR_SECTION
616 b 2b
617END_FTR_SECTION_IFSET(CPU_FTR_601)
618 li r10,-1
619 stw r10,TRAP(r11)
620 addi r3,r1,STACK_FRAME_OVERHEAD
621 lis r10,MSR_KERNEL@h
622 ori r10,r10,MSR_KERNEL@l
623 bl transfer_to_handler_full
624 .long nonrecoverable_exception
625 .long ret_from_except
626#endif
627
628 .globl ret_from_except_full
629ret_from_except_full:
630 REST_NVGPRS(r1)
631 /* fall through */
632
633 .globl ret_from_except
634ret_from_except:
635 /* Hard-disable interrupts so that current_thread_info()->flags
636 * can't change between when we test it and when we return
637 * from the interrupt. */
638 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
639 SYNC /* Some chip revs have problems here... */
640 MTMSRD(r10) /* disable interrupts */
641
642 lwz r3,_MSR(r1) /* Returning to user mode? */
643 andi. r0,r3,MSR_PR
644 beq resume_kernel
645
646user_exc_return: /* r10 contains MSR_KERNEL here */
647 /* Check current_thread_info()->flags */
648 rlwinm r9,r1,0,0,18
649 lwz r9,TI_FLAGS(r9)
650 andi. r0,r9,_TIF_USER_WORK_MASK
651 bne do_work
652
653restore_user:
654#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
655 /* Check whether this process has its own DBCR0 value. The single
656 step bit tells us that dbcr0 should be loaded. */
657 lwz r0,THREAD+THREAD_DBCR0(r2)
658 andis. r10,r0,DBCR0_IC@h
659 bnel- load_dbcr0
660#endif
661
662#ifdef CONFIG_PREEMPT
663 b restore
664
665/* N.B. the only way to get here is from the beq following ret_from_except. */
666resume_kernel:
667 /* check current_thread_info->preempt_count */
668 rlwinm r9,r1,0,0,18
669 lwz r0,TI_PREEMPT(r9)
670 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
671 bne restore
672 lwz r0,TI_FLAGS(r9)
673 andi. r0,r0,_TIF_NEED_RESCHED
674 beq+ restore
675 andi. r0,r3,MSR_EE /* interrupts off? */
676 beq restore /* don't schedule if so */
6771: bl preempt_schedule_irq
678 rlwinm r9,r1,0,0,18
679 lwz r3,TI_FLAGS(r9)
680 andi. r0,r3,_TIF_NEED_RESCHED
681 bne- 1b
682#else
683resume_kernel:
684#endif /* CONFIG_PREEMPT */
685
686 /* interrupts are hard-disabled at this point */
687restore:
688#ifdef CONFIG_44x
689 lis r4,icache_44x_need_flush@ha
690 lwz r5,icache_44x_need_flush@l(r4)
691 cmplwi cr0,r5,0
692 beq+ 1f
693 li r6,0
694 iccci r0,r0
695 stw r6,icache_44x_need_flush@l(r4)
6961:
697#endif /* CONFIG_44x */
698 lwz r0,GPR0(r1)
699 lwz r2,GPR2(r1)
700 REST_4GPRS(3, r1)
701 REST_2GPRS(7, r1)
702
703 lwz r10,_XER(r1)
704 lwz r11,_CTR(r1)
705 mtspr SPRN_XER,r10
706 mtctr r11
707
708 PPC405_ERR77(0,r1)
709BEGIN_FTR_SECTION
710 lwarx r11,0,r1
711END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
712 stwcx. r0,0,r1 /* to clear the reservation */
713
714#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
715 lwz r9,_MSR(r1)
716 andi. r10,r9,MSR_RI /* check if this exception occurred */
717 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
718
719 lwz r10,_CCR(r1)
720 lwz r11,_LINK(r1)
721 mtcrf 0xFF,r10
722 mtlr r11
723
724 /*
725 * Once we put values in SRR0 and SRR1, we are in a state
726 * where exceptions are not recoverable, since taking an
727 * exception will trash SRR0 and SRR1. Therefore we clear the
728 * MSR:RI bit to indicate this. If we do take an exception,
729 * we can't return to the point of the exception but we
730 * can restart the exception exit path at the label
731 * exc_exit_restart below. -- paulus
732 */
733 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
734 SYNC
735 MTMSRD(r10) /* clear the RI bit */
736 .globl exc_exit_restart
737exc_exit_restart:
738 lwz r9,_MSR(r1)
739 lwz r12,_NIP(r1)
740 FIX_SRR1(r9,r10)
741 mtspr SPRN_SRR0,r12
742 mtspr SPRN_SRR1,r9
743 REST_4GPRS(9, r1)
744 lwz r1,GPR1(r1)
745 .globl exc_exit_restart_end
746exc_exit_restart_end:
747 SYNC
748 RFI
749
750#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
751 /*
752 * This is a bit different on 4xx/Book-E because it doesn't have
753 * the RI bit in the MSR.
754 * The TLB miss handler checks if we have interrupted
755 * the exception exit path and restarts it if so
756 * (well maybe one day it will... :).
757 */
758 lwz r11,_LINK(r1)
759 mtlr r11
760 lwz r10,_CCR(r1)
761 mtcrf 0xff,r10
762 REST_2GPRS(9, r1)
763 .globl exc_exit_restart
764exc_exit_restart:
765 lwz r11,_NIP(r1)
766 lwz r12,_MSR(r1)
767exc_exit_start:
768 mtspr SPRN_SRR0,r11
769 mtspr SPRN_SRR1,r12
770 REST_2GPRS(11, r1)
771 lwz r1,GPR1(r1)
772 .globl exc_exit_restart_end
773exc_exit_restart_end:
774 PPC405_ERR77_SYNC
775 rfi
776 b . /* prevent prefetch past rfi */
777
778/*
779 * Returning from a critical interrupt in user mode doesn't need
780 * to be any different from a normal exception. For a critical
781 * interrupt in the kernel, we just return (without checking for
782 * preemption) since the interrupt may have happened at some crucial
783 * place (e.g. inside the TLB miss handler), and because we will be
784 * running with r1 pointing into critical_stack, not the current
785 * process's kernel stack (and therefore current_thread_info() will
786 * give the wrong answer).
787 * We have to restore various SPRs that may have been in use at the
788 * time of the critical interrupt.
789 *
790 */
791#ifdef CONFIG_40x
792#define PPC_40x_TURN_OFF_MSR_DR \
793 /* avoid any possible TLB misses here by turning off MSR.DR, we \
794 * assume the instructions here are mapped by a pinned TLB entry */ \
795 li r10,MSR_IR; \
796 mtmsr r10; \
797 isync; \
798 tophys(r1, r1);
799#else
800#define PPC_40x_TURN_OFF_MSR_DR
801#endif
802
803#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
804 REST_NVGPRS(r1); \
805 lwz r3,_MSR(r1); \
806 andi. r3,r3,MSR_PR; \
807 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
808 bne user_exc_return; \
809 lwz r0,GPR0(r1); \
810 lwz r2,GPR2(r1); \
811 REST_4GPRS(3, r1); \
812 REST_2GPRS(7, r1); \
813 lwz r10,_XER(r1); \
814 lwz r11,_CTR(r1); \
815 mtspr SPRN_XER,r10; \
816 mtctr r11; \
817 PPC405_ERR77(0,r1); \
818 stwcx. r0,0,r1; /* to clear the reservation */ \
819 lwz r11,_LINK(r1); \
820 mtlr r11; \
821 lwz r10,_CCR(r1); \
822 mtcrf 0xff,r10; \
823 PPC_40x_TURN_OFF_MSR_DR; \
824 lwz r9,_DEAR(r1); \
825 lwz r10,_ESR(r1); \
826 mtspr SPRN_DEAR,r9; \
827 mtspr SPRN_ESR,r10; \
828 lwz r11,_NIP(r1); \
829 lwz r12,_MSR(r1); \
830 mtspr exc_lvl_srr0,r11; \
831 mtspr exc_lvl_srr1,r12; \
832 lwz r9,GPR9(r1); \
833 lwz r12,GPR12(r1); \
834 lwz r10,GPR10(r1); \
835 lwz r11,GPR11(r1); \
836 lwz r1,GPR1(r1); \
837 PPC405_ERR77_SYNC; \
838 exc_lvl_rfi; \
839 b .; /* prevent prefetch past exc_lvl_rfi */
840
841 .globl ret_from_crit_exc
842ret_from_crit_exc:
843 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
844
845#ifdef CONFIG_BOOKE
846 .globl ret_from_debug_exc
847ret_from_debug_exc:
848 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
849
850 .globl ret_from_mcheck_exc
851ret_from_mcheck_exc:
852 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
853#endif /* CONFIG_BOOKE */
854
855/*
856 * Load the DBCR0 value for a task that is being ptraced,
857 * having first saved away the global DBCR0. Note that r0
858 * has the dbcr0 value to set upon entry to this.
859 */
860load_dbcr0:
861 mfmsr r10 /* first disable debug exceptions */
862 rlwinm r10,r10,0,~MSR_DE
863 mtmsr r10
864 isync
865 mfspr r10,SPRN_DBCR0
866 lis r11,global_dbcr0@ha
867 addi r11,r11,global_dbcr0@l
868 stw r10,0(r11)
869 mtspr SPRN_DBCR0,r0
870 lwz r10,4(r11)
871 addi r10,r10,1
872 stw r10,4(r11)
873 li r11,-1
874 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
875 blr
876
877 .section .bss
878 .align 4
879global_dbcr0:
880 .space 8
881 .previous
882#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
883
884do_work: /* r10 contains MSR_KERNEL here */
885 andi. r0,r9,_TIF_NEED_RESCHED
886 beq do_user_signal
887
888do_resched: /* r10 contains MSR_KERNEL here */
889 ori r10,r10,MSR_EE
890 SYNC
891 MTMSRD(r10) /* hard-enable interrupts */
892 bl schedule
893recheck:
894 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
895 SYNC
896 MTMSRD(r10) /* disable interrupts */
897 rlwinm r9,r1,0,0,18
898 lwz r9,TI_FLAGS(r9)
899 andi. r0,r9,_TIF_NEED_RESCHED
900 bne- do_resched
901 andi. r0,r9,_TIF_USER_WORK_MASK
902 beq restore_user
903do_user_signal: /* r10 contains MSR_KERNEL here */
904 ori r10,r10,MSR_EE
905 SYNC
906 MTMSRD(r10) /* hard-enable interrupts */
907 /* save r13-r31 in the exception frame, if not already done */
908 lwz r3,TRAP(r1)
909 andi. r0,r3,1
910 beq 2f
911 SAVE_NVGPRS(r1)
912 rlwinm r3,r3,0,0,30
913 stw r3,TRAP(r1)
9142: li r3,0
915 addi r4,r1,STACK_FRAME_OVERHEAD
916 bl do_signal
917 REST_NVGPRS(r1)
918 b recheck
919
920/*
921 * We come here when we are at the end of handling an exception
922 * that occurred at a place where taking an exception will lose
923 * state information, such as the contents of SRR0 and SRR1.
924 */
925nonrecoverable:
926 lis r10,exc_exit_restart_end@ha
927 addi r10,r10,exc_exit_restart_end@l
928 cmplw r12,r10
929 bge 3f
930 lis r11,exc_exit_restart@ha
931 addi r11,r11,exc_exit_restart@l
932 cmplw r12,r11
933 blt 3f
934 lis r10,ee_restarts@ha
935 lwz r12,ee_restarts@l(r10)
936 addi r12,r12,1
937 stw r12,ee_restarts@l(r10)
938 mr r12,r11 /* restart at exc_exit_restart */
939 blr
9403: /* OK, we can't recover, kill this process */
941 /* but the 601 doesn't implement the RI bit, so assume it's OK */
942BEGIN_FTR_SECTION
943 blr
944END_FTR_SECTION_IFSET(CPU_FTR_601)
945 lwz r3,TRAP(r1)
946 andi. r0,r3,1
947 beq 4f
948 SAVE_NVGPRS(r1)
949 rlwinm r3,r3,0,0,30
950 stw r3,TRAP(r1)
9514: addi r3,r1,STACK_FRAME_OVERHEAD
952 bl nonrecoverable_exception
953 /* shouldn't return */
954 b 4b
955
956 .section .bss
957 .align 2
958ee_restarts:
959 .space 4
960 .previous
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
deleted file mode 100644
index e7e642b95138..000000000000
--- a/arch/ppc/kernel/head.S
+++ /dev/null
@@ -1,1220 +0,0 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13 *
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 */
24
25#include <asm/processor.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/pgtable.h>
29#include <asm/cputable.h>
30#include <asm/cache.h>
31#include <asm/thread_info.h>
32#include <asm/ppc_asm.h>
33#include <asm/asm-offsets.h>
34
35/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
36#define LOAD_BAT(n, reg, RA, RB) \
37 /* see the comment for clear_bats() -- Cort */ \
38 li RA,0; \
39 mtspr SPRN_IBAT##n##U,RA; \
40 mtspr SPRN_DBAT##n##U,RA; \
41 lwz RA,(n*16)+0(reg); \
42 lwz RB,(n*16)+4(reg); \
43 mtspr SPRN_IBAT##n##U,RA; \
44 mtspr SPRN_IBAT##n##L,RB; \
45 beq 1f; \
46 lwz RA,(n*16)+8(reg); \
47 lwz RB,(n*16)+12(reg); \
48 mtspr SPRN_DBAT##n##U,RA; \
49 mtspr SPRN_DBAT##n##L,RB; \
501:
51
52 .text
53 .stabs "arch/ppc/kernel/",N_SO,0,0,0f
54 .stabs "head.S",N_SO,0,0,0f
550:
56 .globl _stext
57_stext:
58
59/*
60 * _start is defined this way because the XCOFF loader in the OpenFirmware
61 * on the powermac expects the entry point to be a procedure descriptor.
62 */
63 .text
64 .globl _start
65_start:
66 /*
67 * These are here for legacy reasons, the kernel used to
68 * need to look like a coff function entry for the pmac
69 * but we're always started by some kind of bootloader now.
70 * -- Cort
71 */
72 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
73 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
74 nop
75
76/* PMAC
77 * Enter here with the kernel text, data and bss loaded starting at
78 * 0, running with virtual == physical mapping.
79 * r5 points to the prom entry point (the client interface handler
80 * address). Address translation is turned on, with the prom
81 * managing the hash table. Interrupts are disabled. The stack
82 * pointer (r1) points to just below the end of the half-meg region
83 * from 0x380000 - 0x400000, which is mapped in already.
84 *
85 * If we are booted from MacOS via BootX, we enter with the kernel
86 * image loaded somewhere, and the following values in registers:
87 * r3: 'BooX' (0x426f6f58)
88 * r4: virtual address of boot_infos_t
89 * r5: 0
90 *
91 * APUS
92 * r3: 'APUS'
93 * r4: physical address of memory base
94 * Linux/m68k style BootInfo structure at &_end.
95 *
96 * PREP
97 * This is jumped to on prep systems right after the kernel is relocated
98 * to its proper place in memory by the boot loader. The expected layout
99 * of the regs is:
100 * r3: ptr to residual data
101 * r4: initrd_start or if no initrd then 0
102 * r5: initrd_end - unused if r4 is 0
103 * r6: Start of command line string
104 * r7: End of command line string
105 *
106 * This just gets a minimal mmu environment setup so we can call
107 * start_here() to do the real work.
108 * -- Cort
109 */
110
111 .globl __start
112__start:
113 mr r31,r3 /* save parameters */
114 mr r30,r4
115 mr r29,r5
116 mr r28,r6
117 mr r27,r7
118 li r24,0 /* cpu # */
119
120/*
121 * early_init() does the early machine identification and does
122 * the necessary low-level setup and clears the BSS
123 * -- Cort <cort@fsmlabs.com>
124 */
125 bl early_init
126
127/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
128 * the physical address we are running at, returned by early_init()
129 */
130 bl mmu_off
131__after_mmu_off:
132 bl clear_bats
133 bl flush_tlbs
134
135 bl initial_bats
136#ifdef CONFIG_BOOTX_TEXT
137 bl setup_disp_bat
138#endif
139
140/*
141 * Call setup_cpu for CPU 0 and initialize 6xx Idle
142 */
143 bl reloc_offset
144 li r24,0 /* cpu# */
145 bl call_setup_cpu /* Call setup_cpu for this CPU */
146#ifdef CONFIG_6xx
147 bl reloc_offset
148 bl init_idle_6xx
149#endif /* CONFIG_6xx */
150
151
152/*
153 * We need to run with _start at physical address 0.
154 * If the MMU is already turned on, we copy stuff to KERNELBASE,
155 * otherwise we copy it to 0.
156 */
157 bl reloc_offset
158 mr r26,r3
159 addis r4,r3,KERNELBASE@h /* current address of _start */
160 cmpwi 0,r4,0 /* are we already running at 0? */
161 bne relocate_kernel
162
163/*
164 * we now have the 1st 16M of ram mapped with the bats.
165 * prep needs the mmu to be turned on here, but pmac already has it on.
166 * this shouldn't bother the pmac since it just gets turned on again
167 * as we jump to our code at KERNELBASE. -- Cort
168 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
169 * off, and in other cases, we now turn it off before changing BATs above.
170 */
171turn_on_mmu:
172 mfmsr r0
173 ori r0,r0,MSR_DR|MSR_IR
174 mtspr SPRN_SRR1,r0
175 lis r0,start_here@h
176 ori r0,r0,start_here@l
177 mtspr SPRN_SRR0,r0
178 SYNC
179 RFI /* enables MMU */
180
181/*
182 * We need __secondary_hold as a place to hold the other cpus on
183 * an SMP machine, even when we are running a UP kernel.
184 */
185 . = 0xc0 /* for prep bootloader */
186 li r3,1 /* MTX only has 1 cpu */
187 .globl __secondary_hold
188__secondary_hold:
189 /* tell the master we're here */
190 stw r3,4(0)
191#ifdef CONFIG_SMP
192100: lwz r4,0(0)
193 /* wait until we're told to start */
194 cmpw 0,r4,r3
195 bne 100b
196 /* our cpu # was at addr 0 - go */
197 mr r24,r3 /* cpu # */
198 b __secondary_start
199#else
200 b .
201#endif /* CONFIG_SMP */
202
203/*
204 * Exception entry code. This code runs with address translation
205 * turned off, i.e. using physical addresses.
206 * We assume sprg3 has the physical address of the current
207 * task's thread_struct.
208 */
209#define EXCEPTION_PROLOG \
210 mtspr SPRN_SPRG0,r10; \
211 mtspr SPRN_SPRG1,r11; \
212 mfcr r10; \
213 EXCEPTION_PROLOG_1; \
214 EXCEPTION_PROLOG_2
215
216#define EXCEPTION_PROLOG_1 \
217 mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
218 andi. r11,r11,MSR_PR; \
219 tophys(r11,r1); /* use tophys(r1) if kernel */ \
220 beq 1f; \
221 mfspr r11,SPRN_SPRG3; \
222 lwz r11,THREAD_INFO-THREAD(r11); \
223 addi r11,r11,THREAD_SIZE; \
224 tophys(r11,r11); \
2251: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
226
227
228#define EXCEPTION_PROLOG_2 \
229 CLR_TOP32(r11); \
230 stw r10,_CCR(r11); /* save registers */ \
231 stw r12,GPR12(r11); \
232 stw r9,GPR9(r11); \
233 mfspr r10,SPRN_SPRG0; \
234 stw r10,GPR10(r11); \
235 mfspr r12,SPRN_SPRG1; \
236 stw r12,GPR11(r11); \
237 mflr r10; \
238 stw r10,_LINK(r11); \
239 mfspr r12,SPRN_SRR0; \
240 mfspr r9,SPRN_SRR1; \
241 stw r1,GPR1(r11); \
242 stw r1,0(r11); \
243 tovirt(r1,r11); /* set new kernel sp */ \
244 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
245 MTMSRD(r10); /* (except for mach check in rtas) */ \
246 stw r0,GPR0(r11); \
247 SAVE_4GPRS(3, r11); \
248 SAVE_2GPRS(7, r11)
249
250/*
251 * Note: code which follows this uses cr0.eq (set if from kernel),
252 * r11, r12 (SRR0), and r9 (SRR1).
253 *
254 * Note2: once we have set r1 we are in a position to take exceptions
255 * again, and we could thus set MSR:RI at that point.
256 */
257
258/*
259 * Exception vectors.
260 */
261#define EXCEPTION(n, label, hdlr, xfer) \
262 . = n; \
263label: \
264 EXCEPTION_PROLOG; \
265 addi r3,r1,STACK_FRAME_OVERHEAD; \
266 xfer(n, hdlr)
267
268#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
269 li r10,trap; \
270 stw r10,TRAP(r11); \
271 li r10,MSR_KERNEL; \
272 copyee(r10, r9); \
273 bl tfer; \
274i##n: \
275 .long hdlr; \
276 .long ret
277
278#define COPY_EE(d, s) rlwimi d,s,0,16,16
279#define NOCOPY(d, s)
280
281#define EXC_XFER_STD(n, hdlr) \
282 EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
283 ret_from_except_full)
284
285#define EXC_XFER_LITE(n, hdlr) \
286 EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
287 ret_from_except)
288
289#define EXC_XFER_EE(n, hdlr) \
290 EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
291 ret_from_except_full)
292
293#define EXC_XFER_EE_LITE(n, hdlr) \
294 EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
295 ret_from_except)
296
297/* System reset */
298/* core99 pmac starts the seconary here by changing the vector, and
299 putting it back to what it was (unknown_exception) when done. */
300 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
301
302/* Machine check */
303 . = 0x200
304 mtspr SPRN_SPRG0,r10
305 mtspr SPRN_SPRG1,r11
306 mfcr r10
307 EXCEPTION_PROLOG_1
3087: EXCEPTION_PROLOG_2
309 addi r3,r1,STACK_FRAME_OVERHEAD
310 EXC_XFER_STD(0x200, machine_check_exception)
311
312/* Data access exception. */
313 . = 0x300
314DataAccess:
315 EXCEPTION_PROLOG
316 mfspr r10,SPRN_DSISR
317 andis. r0,r10,0xa470 /* weird error? */
318 bne 1f /* if not, try to put a PTE */
319 mfspr r4,SPRN_DAR /* into the hash table */
320 rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
321 bl hash_page
3221: stw r10,_DSISR(r11)
323 mr r5,r10
324 mfspr r4,SPRN_DAR
325 EXC_XFER_EE_LITE(0x300, handle_page_fault)
326
327/* Instruction access exception. */
328 . = 0x400
329InstructionAccess:
330 EXCEPTION_PROLOG
331 andis. r0,r9,0x4000 /* no pte found? */
332 beq 1f /* if so, try to put a PTE */
333 li r3,0 /* into the hash table */
334 mr r4,r12 /* SRR0 is fault address */
335 bl hash_page
3361: mr r4,r12
337 mr r5,r9
338 EXC_XFER_EE_LITE(0x400, handle_page_fault)
339
340/* External interrupt */
341 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
342
343/* Alignment exception */
344 . = 0x600
345Alignment:
346 EXCEPTION_PROLOG
347 mfspr r4,SPRN_DAR
348 stw r4,_DAR(r11)
349 mfspr r5,SPRN_DSISR
350 stw r5,_DSISR(r11)
351 addi r3,r1,STACK_FRAME_OVERHEAD
352 EXC_XFER_EE(0x600, alignment_exception)
353
354/* Program check exception */
355 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
356
357/* Floating-point unavailable */
358 . = 0x800
359FPUnavailable:
360 EXCEPTION_PROLOG
361 bne load_up_fpu /* if from user, just load it up */
362 addi r3,r1,STACK_FRAME_OVERHEAD
363 EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
364
365/* Decrementer */
366 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
367
368 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
369 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
370
371/* System call */
372 . = 0xc00
373SystemCall:
374 EXCEPTION_PROLOG
375 EXC_XFER_EE_LITE(0xc00, DoSyscall)
376
377/* Single step - not used on 601 */
378 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
379 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
380
381/*
382 * The Altivec unavailable trap is at 0x0f20. Foo.
383 * We effectively remap it to 0x3000.
384 * We include an altivec unavailable exception vector even if
385 * not configured for Altivec, so that you can't panic a
386 * non-altivec kernel running on a machine with altivec just
387 * by executing an altivec instruction.
388 */
389 . = 0xf00
390 b Trap_0f
391
392 . = 0xf20
393 b AltiVecUnavailable
394
395Trap_0f:
396 EXCEPTION_PROLOG
397 addi r3,r1,STACK_FRAME_OVERHEAD
398 EXC_XFER_EE(0xf00, unknown_exception)
399
400/*
401 * Handle TLB miss for instruction on 603/603e.
402 * Note: we get an alternate set of r0 - r3 to use automatically.
403 */
404 . = 0x1000
405InstructionTLBMiss:
406/*
407 * r0: stored ctr
408 * r1: linux style pte ( later becomes ppc hardware pte )
409 * r2: ptr to linux-style pte
410 * r3: scratch
411 */
412 mfctr r0
413 /* Get PTE (linux-style) and check access */
414 mfspr r3,SPRN_IMISS
415 lis r1,KERNELBASE@h /* check if kernel address */
416 cmplw 0,r3,r1
417 mfspr r2,SPRN_SPRG3
418 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
419 lwz r2,PGDIR(r2)
420 blt+ 112f
421 lis r2,swapper_pg_dir@ha /* if kernel address, use */
422 addi r2,r2,swapper_pg_dir@l /* kernel page table */
423 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
424 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
425112: tophys(r2,r2)
426 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
427 lwz r2,0(r2) /* get pmd entry */
428 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
429 beq- InstructionAddressInvalid /* return if no mapping */
430 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
431 lwz r3,0(r2) /* get linux-style pte */
432 andc. r1,r1,r3 /* check access & ~permission */
433 bne- InstructionAddressInvalid /* return if access not permitted */
434 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
435 /*
436 * NOTE! We are assuming this is not an SMP system, otherwise
437 * we would need to update the pte atomically with lwarx/stwcx.
438 */
439 stw r3,0(r2) /* update PTE (accessed bit) */
440 /* Convert linux-style PTE to low word of PPC-style PTE */
441 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */
442 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
443 and r1,r1,r2 /* writable if _RW and _DIRTY */
444 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
445 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
446 ori r1,r1,0xe14 /* clear out reserved bits and M */
447 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
448 mtspr SPRN_RPA,r1
449 mfspr r3,SPRN_IMISS
450 tlbli r3
451 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
452 mtcrf 0x80,r3
453 rfi
454InstructionAddressInvalid:
455 mfspr r3,SPRN_SRR1
456 rlwinm r1,r3,9,6,6 /* Get load/store bit */
457
458 addis r1,r1,0x2000
459 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
460 mtctr r0 /* Restore CTR */
461 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
462 or r2,r2,r1
463 mtspr SPRN_SRR1,r2
464 mfspr r1,SPRN_IMISS /* Get failing address */
465 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
466 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
467 xor r1,r1,r2
468 mtspr SPRN_DAR,r1 /* Set fault address */
469 mfmsr r0 /* Restore "normal" registers */
470 xoris r0,r0,MSR_TGPR>>16
471 mtcrf 0x80,r3 /* Restore CR0 */
472 mtmsr r0
473 b InstructionAccess
474
475/*
476 * Handle TLB miss for DATA Load operation on 603/603e
477 */
478 . = 0x1100
479DataLoadTLBMiss:
480/*
481 * r0: stored ctr
482 * r1: linux style pte ( later becomes ppc hardware pte )
483 * r2: ptr to linux-style pte
484 * r3: scratch
485 */
486 mfctr r0
487 /* Get PTE (linux-style) and check access */
488 mfspr r3,SPRN_DMISS
489 lis r1,KERNELBASE@h /* check if kernel address */
490 cmplw 0,r3,r1
491 mfspr r2,SPRN_SPRG3
492 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
493 lwz r2,PGDIR(r2)
494 blt+ 112f
495 lis r2,swapper_pg_dir@ha /* if kernel address, use */
496 addi r2,r2,swapper_pg_dir@l /* kernel page table */
497 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
498 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
499112: tophys(r2,r2)
500 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
501 lwz r2,0(r2) /* get pmd entry */
502 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
503 beq- DataAddressInvalid /* return if no mapping */
504 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
505 lwz r3,0(r2) /* get linux-style pte */
506 andc. r1,r1,r3 /* check access & ~permission */
507 bne- DataAddressInvalid /* return if access not permitted */
508 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
509 /*
510 * NOTE! We are assuming this is not an SMP system, otherwise
511 * we would need to update the pte atomically with lwarx/stwcx.
512 */
513 stw r3,0(r2) /* update PTE (accessed bit) */
514 /* Convert linux-style PTE to low word of PPC-style PTE */
515 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */
516 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
517 and r1,r1,r2 /* writable if _RW and _DIRTY */
518 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
519 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
520 ori r1,r1,0xe14 /* clear out reserved bits and M */
521 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
522 mtspr SPRN_RPA,r1
523 mfspr r3,SPRN_DMISS
524 tlbld r3
525 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
526 mtcrf 0x80,r3
527 rfi
528DataAddressInvalid:
529 mfspr r3,SPRN_SRR1
530 rlwinm r1,r3,9,6,6 /* Get load/store bit */
531 addis r1,r1,0x2000
532 mtspr SPRN_DSISR,r1
533 mtctr r0 /* Restore CTR */
534 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
535 mtspr SPRN_SRR1,r2
536 mfspr r1,SPRN_DMISS /* Get failing address */
537 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
538 beq 20f /* Jump if big endian */
539 xori r1,r1,3
54020: mtspr SPRN_DAR,r1 /* Set fault address */
541 mfmsr r0 /* Restore "normal" registers */
542 xoris r0,r0,MSR_TGPR>>16
543 mtcrf 0x80,r3 /* Restore CR0 */
544 mtmsr r0
545 b DataAccess
546
547/*
548 * Handle TLB miss for DATA Store on 603/603e
549 */
550 . = 0x1200
551DataStoreTLBMiss:
552/*
553 * r0: stored ctr
554 * r1: linux style pte ( later becomes ppc hardware pte )
555 * r2: ptr to linux-style pte
556 * r3: scratch
557 */
558 mfctr r0
559 /* Get PTE (linux-style) and check access */
560 mfspr r3,SPRN_DMISS
561 lis r1,KERNELBASE@h /* check if kernel address */
562 cmplw 0,r3,r1
563 mfspr r2,SPRN_SPRG3
564 li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */
565 lwz r2,PGDIR(r2)
566 blt+ 112f
567 lis r2,swapper_pg_dir@ha /* if kernel address, use */
568 addi r2,r2,swapper_pg_dir@l /* kernel page table */
569 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
570 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
571112: tophys(r2,r2)
572 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
573 lwz r2,0(r2) /* get pmd entry */
574 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
575 beq- DataAddressInvalid /* return if no mapping */
576 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
577 lwz r3,0(r2) /* get linux-style pte */
578 andc. r1,r1,r3 /* check access & ~permission */
579 bne- DataAddressInvalid /* return if access not permitted */
580 ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY
581 /*
582 * NOTE! We are assuming this is not an SMP system, otherwise
583 * we would need to update the pte atomically with lwarx/stwcx.
584 */
585 stw r3,0(r2) /* update PTE (accessed/dirty bits) */
586 /* Convert linux-style PTE to low word of PPC-style PTE */
587 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
588 li r1,0xe15 /* clear out reserved bits and M */
589 andc r1,r3,r1 /* PP = user? 2: 0 */
590 mtspr SPRN_RPA,r1
591 mfspr r3,SPRN_DMISS
592 tlbld r3
593 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
594 mtcrf 0x80,r3
595 rfi
596
597#ifndef CONFIG_ALTIVEC
598#define altivec_assist_exception unknown_exception
599#endif
600
601 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
602 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
603 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
604 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
605 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
606 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
607 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
608 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
609 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
610 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
611 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
612 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
613 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
614 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
615 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE)
616 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE)
617 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE)
618 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE)
619 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE)
620 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE)
621 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE)
622 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE)
623 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE)
624 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE)
625 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE)
626 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
627 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
628 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
629 EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE)
630
631 .globl mol_trampoline
632 .set mol_trampoline, i0x2f00
633
634 . = 0x3000
635
636AltiVecUnavailable:
637 EXCEPTION_PROLOG
638#ifdef CONFIG_ALTIVEC
639 bne load_up_altivec /* if from user, just load it up */
640#endif /* CONFIG_ALTIVEC */
641 addi r3,r1,STACK_FRAME_OVERHEAD
642 EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
643
644#ifdef CONFIG_ALTIVEC
645/* Note that the AltiVec support is closely modeled after the FP
646 * support. Changes to one are likely to be applicable to the
647 * other! */
648load_up_altivec:
649/*
650 * Disable AltiVec for the task which had AltiVec previously,
651 * and save its AltiVec registers in its thread_struct.
652 * Enables AltiVec for use in the kernel on return.
653 * On SMP we know the AltiVec units are free, since we give it up every
654 * switch. -- Kumar
655 */
656 mfmsr r5
657 oris r5,r5,MSR_VEC@h
658 MTMSRD(r5) /* enable use of AltiVec now */
659 isync
660/*
661 * For SMP, we don't do lazy AltiVec switching because it just gets too
662 * horrendously complex, especially when a task switches from one CPU
663 * to another. Instead we call giveup_altivec in switch_to.
664 */
665#ifndef CONFIG_SMP
666 tophys(r6,0)
667 addis r3,r6,last_task_used_altivec@ha
668 lwz r4,last_task_used_altivec@l(r3)
669 cmpwi 0,r4,0
670 beq 1f
671 add r4,r4,r6
672 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
673 SAVE_32VRS(0,r10,r4)
674 mfvscr vr0
675 li r10,THREAD_VSCR
676 stvx vr0,r10,r4
677 lwz r5,PT_REGS(r4)
678 add r5,r5,r6
679 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
680 lis r10,MSR_VEC@h
681 andc r4,r4,r10 /* disable altivec for previous task */
682 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
6831:
684#endif /* CONFIG_SMP */
685 /* enable use of AltiVec after return */
686 oris r9,r9,MSR_VEC@h
687 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
688 li r4,1
689 li r10,THREAD_VSCR
690 stw r4,THREAD_USED_VR(r5)
691 lvx vr0,r10,r5
692 mtvscr vr0
693 REST_32VRS(0,r10,r5)
694#ifndef CONFIG_SMP
695 subi r4,r5,THREAD
696 sub r4,r4,r6
697 stw r4,last_task_used_altivec@l(r3)
698#endif /* CONFIG_SMP */
699 /* restore registers and return */
700 /* we haven't used ctr or xer or lr */
701 b fast_exception_return
702
703/*
704 * giveup_altivec(tsk)
705 * Disable AltiVec for the task given as the argument,
706 * and save the AltiVec registers in its thread_struct.
707 * Enables AltiVec for use in the kernel on return.
708 */
709
710 .globl giveup_altivec
711giveup_altivec:
712 mfmsr r5
713 oris r5,r5,MSR_VEC@h
714 SYNC
715 MTMSRD(r5) /* enable use of AltiVec now */
716 isync
717 cmpwi 0,r3,0
718 beqlr- /* if no previous owner, done */
719 addi r3,r3,THREAD /* want THREAD of task */
720 lwz r5,PT_REGS(r3)
721 cmpwi 0,r5,0
722 SAVE_32VRS(0, r4, r3)
723 mfvscr vr0
724 li r4,THREAD_VSCR
725 stvx vr0,r4,r3
726 beq 1f
727 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
728 lis r3,MSR_VEC@h
729 andc r4,r4,r3 /* disable AltiVec for previous task */
730 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
7311:
732#ifndef CONFIG_SMP
733 li r5,0
734 lis r4,last_task_used_altivec@ha
735 stw r5,last_task_used_altivec@l(r4)
736#endif /* CONFIG_SMP */
737 blr
738#endif /* CONFIG_ALTIVEC */
739
740/*
741 * This code is jumped to from the startup code to copy
742 * the kernel image to physical address 0.
743 */
744relocate_kernel:
745 addis r9,r26,klimit@ha /* fetch klimit */
746 lwz r25,klimit@l(r9)
747 addis r25,r25,-KERNELBASE@h
748 li r3,0 /* Destination base address */
749 li r6,0 /* Destination offset */
750 li r5,0x4000 /* # bytes of memory to copy */
751 bl copy_and_flush /* copy the first 0x4000 bytes */
752 addi r0,r3,4f@l /* jump to the address of 4f */
753 mtctr r0 /* in copy and do the rest. */
754 bctr /* jump to the copy */
7554: mr r5,r25
756 bl copy_and_flush /* copy the rest */
757 b turn_on_mmu
758
759/*
760 * Copy routine used to copy the kernel to start at physical address 0
761 * and flush and invalidate the caches as needed.
762 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
763 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
764 */
765copy_and_flush:
766 addi r5,r5,-4
767 addi r6,r6,-4
7684: li r0,L1_CACHE_BYTES/4
769 mtctr r0
7703: addi r6,r6,4 /* copy a cache line */
771 lwzx r0,r6,r4
772 stwx r0,r6,r3
773 bdnz 3b
774 dcbst r6,r3 /* write it to memory */
775 sync
776 icbi r6,r3 /* flush the icache line */
777 cmplw 0,r6,r5
778 blt 4b
779 sync /* additional sync needed on g4 */
780 isync
781 addi r5,r5,4
782 addi r6,r6,4
783 blr
784
785#ifdef CONFIG_SMP
786 .globl __secondary_start_pmac_0
787__secondary_start_pmac_0:
788 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
789 li r24,0
790 b 1f
791 li r24,1
792 b 1f
793 li r24,2
794 b 1f
795 li r24,3
7961:
797 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
798 set to map the 0xf0000000 - 0xffffffff region */
799 mfmsr r0
800 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
801 SYNC
802 mtmsr r0
803 isync
804
805 .globl __secondary_start
806__secondary_start:
807 /* Copy some CPU settings from CPU 0 */
808 bl __restore_cpu_setup
809
810 lis r3,-KERNELBASE@h
811 mr r4,r24
812 bl call_setup_cpu /* Call setup_cpu for this CPU */
813#ifdef CONFIG_6xx
814 lis r3,-KERNELBASE@h
815 bl init_idle_6xx
816#endif /* CONFIG_6xx */
817
818 /* get current_thread_info and current */
819 lis r1,secondary_ti@ha
820 tophys(r1,r1)
821 lwz r1,secondary_ti@l(r1)
822 tophys(r2,r1)
823 lwz r2,TI_TASK(r2)
824
825 /* stack */
826 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
827 li r0,0
828 tophys(r3,r1)
829 stw r0,0(r3)
830
831 /* load up the MMU */
832 bl load_up_mmu
833
834 /* ptr to phys current thread */
835 tophys(r4,r2)
836 addi r4,r4,THREAD /* phys address of our thread_struct */
837 CLR_TOP32(r4)
838 mtspr SPRN_SPRG3,r4
839 li r3,0
840 mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */
841
842 /* enable MMU and jump to start_secondary */
843 li r4,MSR_KERNEL
844 FIX_SRR1(r4,r5)
845 lis r3,start_secondary@h
846 ori r3,r3,start_secondary@l
847 mtspr SPRN_SRR0,r3
848 mtspr SPRN_SRR1,r4
849 SYNC
850 RFI
851#endif /* CONFIG_SMP */
852
853/*
854 * Those generic dummy functions are kept for CPUs not
855 * included in CONFIG_6xx
856 */
857#if !defined(CONFIG_6xx)
858_GLOBAL(__save_cpu_setup)
859 blr
860_GLOBAL(__restore_cpu_setup)
861 blr
862#endif /* !defined(CONFIG_6xx) */
863
864
865/*
866 * Load stuff into the MMU. Intended to be called with
867 * IR=0 and DR=0.
868 */
869load_up_mmu:
870 sync /* Force all PTE updates to finish */
871 isync
872 tlbia /* Clear all TLB entries */
873 sync /* wait for tlbia/tlbie to finish */
874 TLBSYNC /* ... on all CPUs */
875 /* Load the SDR1 register (hash table base & size) */
876 lis r6,_SDR1@ha
877 tophys(r6,r6)
878 lwz r6,_SDR1@l(r6)
879 mtspr SPRN_SDR1,r6
880 li r0,16 /* load up segment register values */
881 mtctr r0 /* for context 0 */
882 lis r3,0x2000 /* Ku = 1, VSID = 0 */
883 li r4,0
8843: mtsrin r3,r4
885 addi r3,r3,0x111 /* increment VSID */
886 addis r4,r4,0x1000 /* address of next segment */
887 bdnz 3b
888
889/* Load the BAT registers with the values set up by MMU_init.
890 MMU_init takes care of whether we're on a 601 or not. */
891 mfpvr r3
892 srwi r3,r3,16
893 cmpwi r3,1
894 lis r3,BATS@ha
895 addi r3,r3,BATS@l
896 tophys(r3,r3)
897 LOAD_BAT(0,r3,r4,r5)
898 LOAD_BAT(1,r3,r4,r5)
899 LOAD_BAT(2,r3,r4,r5)
900 LOAD_BAT(3,r3,r4,r5)
901
902 blr
903
904/*
905 * This is where the main kernel code starts.
906 */
907start_here:
908 /* ptr to current */
909 lis r2,init_task@h
910 ori r2,r2,init_task@l
911 /* Set up for using our exception vectors */
912 /* ptr to phys current thread */
913 tophys(r4,r2)
914 addi r4,r4,THREAD /* init task's THREAD */
915 CLR_TOP32(r4)
916 mtspr SPRN_SPRG3,r4
917 li r3,0
918 mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */
919
920 /* stack */
921 lis r1,init_thread_union@ha
922 addi r1,r1,init_thread_union@l
923 li r0,0
924 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
925/*
926 * Do early bootinfo parsing, platform-specific initialization,
927 * and set up the MMU.
928 */
929 mr r3,r31
930 mr r4,r30
931 mr r5,r29
932 mr r6,r28
933 mr r7,r27
934 bl machine_init
935 bl MMU_init
936
937/*
938 * Go back to running unmapped so we can load up new values
939 * for SDR1 (hash table pointer) and the segment registers
940 * and change to using our exception vectors.
941 */
942 lis r4,2f@h
943 ori r4,r4,2f@l
944 tophys(r4,r4)
945 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
946 FIX_SRR1(r3,r5)
947 mtspr SPRN_SRR0,r4
948 mtspr SPRN_SRR1,r3
949 SYNC
950 RFI
951/* Load up the kernel context */
9522: bl load_up_mmu
953
954#ifdef CONFIG_BDI_SWITCH
955 /* Add helper information for the Abatron bdiGDB debugger.
956 * We do this here because we know the mmu is disabled, and
957 * will be enabled for real in just a few instructions.
958 */
959 lis r5, abatron_pteptrs@h
960 ori r5, r5, abatron_pteptrs@l
961 stw r5, 0xf0(r0) /* This much match your Abatron config */
962 lis r6, swapper_pg_dir@h
963 ori r6, r6, swapper_pg_dir@l
964 tophys(r5, r5)
965 stw r6, 0(r5)
966#endif /* CONFIG_BDI_SWITCH */
967
968/* Now turn on the MMU for real! */
969 li r4,MSR_KERNEL
970 FIX_SRR1(r4,r5)
971 lis r3,start_kernel@h
972 ori r3,r3,start_kernel@l
973 mtspr SPRN_SRR0,r3
974 mtspr SPRN_SRR1,r4
975 SYNC
976 RFI
977
978/*
979 * Set up the segment registers for a new context.
980 */
981_GLOBAL(set_context)
982 mulli r3,r3,897 /* multiply context by skew factor */
983 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
984 addis r3,r3,0x6000 /* Set Ks, Ku bits */
985 li r0,NUM_USER_SEGMENTS
986 mtctr r0
987
988#ifdef CONFIG_BDI_SWITCH
989 /* Context switch the PTE pointer for the Abatron BDI2000.
990 * The PGDIR is passed as second argument.
991 */
992 lis r5, KERNELBASE@h
993 lwz r5, 0xf0(r5)
994 stw r4, 0x4(r5)
995#endif
996 li r4,0
997 isync
9983:
999 mtsrin r3,r4
1000 addi r3,r3,0x111 /* next VSID */
1001 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
1002 addis r4,r4,0x1000 /* address of next segment */
1003 bdnz 3b
1004 sync
1005 isync
1006 blr
1007
1008/*
1009 * An undocumented "feature" of 604e requires that the v bit
1010 * be cleared before changing BAT values.
1011 *
1012 * Also, newer IBM firmware does not clear bat3 and 4 so
1013 * this makes sure it's done.
1014 * -- Cort
1015 */
1016clear_bats:
1017 li r10,0
1018 mfspr r9,SPRN_PVR
1019 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1020 cmpwi r9, 1
1021 beq 1f
1022
1023 mtspr SPRN_DBAT0U,r10
1024 mtspr SPRN_DBAT0L,r10
1025 mtspr SPRN_DBAT1U,r10
1026 mtspr SPRN_DBAT1L,r10
1027 mtspr SPRN_DBAT2U,r10
1028 mtspr SPRN_DBAT2L,r10
1029 mtspr SPRN_DBAT3U,r10
1030 mtspr SPRN_DBAT3L,r10
10311:
1032 mtspr SPRN_IBAT0U,r10
1033 mtspr SPRN_IBAT0L,r10
1034 mtspr SPRN_IBAT1U,r10
1035 mtspr SPRN_IBAT1L,r10
1036 mtspr SPRN_IBAT2U,r10
1037 mtspr SPRN_IBAT2L,r10
1038 mtspr SPRN_IBAT3U,r10
1039 mtspr SPRN_IBAT3L,r10
1040BEGIN_FTR_SECTION
1041 /* Here's a tweak: at this point, CPU setup have
1042 * not been called yet, so HIGH_BAT_EN may not be
1043 * set in HID0 for the 745x processors. However, it
1044 * seems that doesn't affect our ability to actually
1045 * write to these SPRs.
1046 */
1047 mtspr SPRN_DBAT4U,r10
1048 mtspr SPRN_DBAT4L,r10
1049 mtspr SPRN_DBAT5U,r10
1050 mtspr SPRN_DBAT5L,r10
1051 mtspr SPRN_DBAT6U,r10
1052 mtspr SPRN_DBAT6L,r10
1053 mtspr SPRN_DBAT7U,r10
1054 mtspr SPRN_DBAT7L,r10
1055 mtspr SPRN_IBAT4U,r10
1056 mtspr SPRN_IBAT4L,r10
1057 mtspr SPRN_IBAT5U,r10
1058 mtspr SPRN_IBAT5L,r10
1059 mtspr SPRN_IBAT6U,r10
1060 mtspr SPRN_IBAT6L,r10
1061 mtspr SPRN_IBAT7U,r10
1062 mtspr SPRN_IBAT7L,r10
1063END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
1064 blr
1065
1066flush_tlbs:
1067 lis r10, 0x40
10681: addic. r10, r10, -0x1000
1069 tlbie r10
1070 blt 1b
1071 sync
1072 blr
1073
1074mmu_off:
1075 addi r4, r3, __after_mmu_off - _start
1076 mfmsr r3
1077 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1078 beqlr
1079 andc r3,r3,r0
1080 mtspr SPRN_SRR0,r4
1081 mtspr SPRN_SRR1,r3
1082 sync
1083 RFI
1084
1085/*
1086 * Use the first pair of BAT registers to map the 1st 16MB
1087 * of RAM to KERNELBASE. From this point on we can't safely
1088 * call OF any more.
1089 */
1090initial_bats:
1091 lis r11,KERNELBASE@h
1092 mfspr r9,SPRN_PVR
1093 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1094 cmpwi 0,r9,1
1095 bne 4f
1096 ori r11,r11,4 /* set up BAT registers for 601 */
1097 li r8,0x7f /* valid, block length = 8MB */
1098 oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
1099 oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
1100 mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
1101 mtspr SPRN_IBAT0L,r8 /* lower BAT register */
1102 mtspr SPRN_IBAT1U,r9
1103 mtspr SPRN_IBAT1L,r10
1104 isync
1105 blr
1106
11074: tophys(r8,r11)
1108#ifdef CONFIG_SMP
1109 ori r8,r8,0x12 /* R/W access, M=1 */
1110#else
1111 ori r8,r8,2 /* R/W access */
1112#endif /* CONFIG_SMP */
1113 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1114
1115 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
1116 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1117 mtspr SPRN_IBAT0L,r8
1118 mtspr SPRN_IBAT0U,r11
1119 isync
1120 blr
1121
1122#ifdef CONFIG_BOOTX_TEXT
1123setup_disp_bat:
1124 /*
1125 * setup the display bat prepared for us in prom.c
1126 */
1127 mflr r8
1128 bl reloc_offset
1129 mtlr r8
1130 addis r8,r3,disp_BAT@ha
1131 addi r8,r8,disp_BAT@l
1132 lwz r11,0(r8)
1133 lwz r8,4(r8)
1134 mfspr r9,SPRN_PVR
1135 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1136 cmpwi 0,r9,1
1137 beq 1f
1138 mtspr SPRN_DBAT3L,r8
1139 mtspr SPRN_DBAT3U,r11
1140 blr
11411: mtspr SPRN_IBAT3L,r8
1142 mtspr SPRN_IBAT3U,r11
1143 blr
1144
1145#endif /* defined(CONFIG_BOOTX_TEXT) */
1146
1147#ifdef CONFIG_8260
1148/* Jump into the system reset for the rom.
1149 * We first disable the MMU, and then jump to the ROM reset address.
1150 *
1151 * r3 is the board info structure, r4 is the location for starting.
1152 * I use this for building a small kernel that can load other kernels,
1153 * rather than trying to write or rely on a rom monitor that can tftp load.
1154 */
1155 .globl m8260_gorom
1156m8260_gorom:
1157 mfmsr r0
1158 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1159 sync
1160 mtmsr r0
1161 sync
1162 mfspr r11, SPRN_HID0
1163 lis r10, 0
1164 ori r10,r10,HID0_ICE|HID0_DCE
1165 andc r11, r11, r10
1166 mtspr SPRN_HID0, r11
1167 isync
1168 li r5, MSR_ME|MSR_RI
1169 lis r6,2f@h
1170 addis r6,r6,-KERNELBASE@h
1171 ori r6,r6,2f@l
1172 mtspr SPRN_SRR0,r6
1173 mtspr SPRN_SRR1,r5
1174 isync
1175 sync
1176 rfi
11772:
1178 mtlr r4
1179 blr
1180#endif
1181
1182
1183/*
1184 * We put a few things here that have to be page-aligned.
1185 * This stuff goes at the beginning of the data segment,
1186 * which is page-aligned.
1187 */
1188 .data
1189 .globl sdata
1190sdata:
1191 .globl empty_zero_page
1192empty_zero_page:
1193 .space 4096
1194
1195 .globl swapper_pg_dir
1196swapper_pg_dir:
1197 .space 4096
1198
1199/*
1200 * This space gets a copy of optional info passed to us by the bootstrap
1201 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
1202 */
1203 .globl cmd_line
1204cmd_line:
1205 .space 512
1206
1207 .globl intercept_table
1208intercept_table:
1209 .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700
1210 .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0
1211 .long 0, 0, 0, i0x1300, 0, 0, 0, 0
1212 .long 0, 0, 0, 0, 0, 0, 0, 0
1213 .long 0, 0, 0, 0, 0, 0, 0, 0
1214 .long 0, 0, 0, 0, 0, 0, 0, 0
1215
1216/* Room for two PTE pointers, usually the kernel and current user pointers
1217 * to their respective root page table.
1218 */
1219abatron_pteptrs:
1220 .space 8
diff --git a/arch/ppc/kernel/head_44x.S b/arch/ppc/kernel/head_44x.S
deleted file mode 100644
index ebb5a403829f..000000000000
--- a/arch/ppc/kernel/head_44x.S
+++ /dev/null
@@ -1,769 +0,0 @@
1/*
2 * Kernel execution entry point code.
3 *
4 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
5 * Initial PowerPC version.
6 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Rewritten for PReP
8 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
9 * Low-level exception handers, MMU support, and rewrite.
10 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
11 * PowerPC 8xx modifications.
12 * Copyright (c) 1998-1999 TiVo, Inc.
13 * PowerPC 403GCX modifications.
14 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
15 * PowerPC 403GCX/405GP modifications.
16 * Copyright 2000 MontaVista Software Inc.
17 * PPC405 modifications
18 * PowerPC 403GCX/405GP modifications.
19 * Author: MontaVista Software, Inc.
20 * frank_rowand@mvista.com or source@mvista.com
21 * debbie_chu@mvista.com
22 * Copyright 2002-2005 MontaVista Software, Inc.
23 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
24 *
25 * This program is free software; you can redistribute it and/or modify it
26 * under the terms of the GNU General Public License as published by the
27 * Free Software Foundation; either version 2 of the License, or (at your
28 * option) any later version.
29 */
30
31#include <asm/processor.h>
32#include <asm/page.h>
33#include <asm/mmu.h>
34#include <asm/pgtable.h>
35#include <asm/ibm4xx.h>
36#include <asm/ibm44x.h>
37#include <asm/cputable.h>
38#include <asm/thread_info.h>
39#include <asm/ppc_asm.h>
40#include <asm/asm-offsets.h>
41#include "head_booke.h"
42
43
44/* As with the other PowerPC ports, it is expected that when code
45 * execution begins here, the following registers contain valid, yet
46 * optional, information:
47 *
48 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
49 * r4 - Starting address of the init RAM disk
50 * r5 - Ending address of the init RAM disk
51 * r6 - Start of kernel command line string (e.g. "mem=128")
52 * r7 - End of kernel command line string
53 *
54 */
55 .text
56_GLOBAL(_stext)
57_GLOBAL(_start)
58 /*
59 * Reserve a word at a fixed location to store the address
60 * of abatron_pteptrs
61 */
62 nop
63/*
64 * Save parameters we are passed
65 */
66 mr r31,r3
67 mr r30,r4
68 mr r29,r5
69 mr r28,r6
70 mr r27,r7
71 li r24,0 /* CPU number */
72
73/*
74 * Set up the initial MMU state
75 *
76 * We are still executing code at the virtual address
77 * mappings set by the firmware for the base of RAM.
78 *
79 * We first invalidate all TLB entries but the one
80 * we are running from. We then load the KERNELBASE
81 * mappings so we can begin to use kernel addresses
82 * natively and so the interrupt vector locations are
83 * permanently pinned (necessary since Book E
84 * implementations always have translation enabled).
85 *
86 * TODO: Use the known TLB entry we are running from to
87 * determine which physical region we are located
88 * in. This can be used to determine where in RAM
89 * (on a shared CPU system) or PCI memory space
90 * (on a DRAMless system) we are located.
91 * For now, we assume a perfect world which means
92 * we are located at the base of DRAM (physical 0).
93 */
94
95/*
96 * Search TLB for entry that we are currently using.
97 * Invalidate all entries but the one we are using.
98 */
99 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
100 mfspr r3,SPRN_PID /* Get PID */
101 mfmsr r4 /* Get MSR */
102 andi. r4,r4,MSR_IS@l /* TS=1? */
103 beq wmmucr /* If not, leave STS=0 */
104 oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
105wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
106 sync
107
108 bl invstr /* Find our address */
109invstr: mflr r5 /* Make it accessible */
110 tlbsx r23,0,r5 /* Find entry we are in */
111 li r4,0 /* Start at TLB entry 0 */
112 li r3,0 /* Set PAGEID inval value */
1131: cmpw r23,r4 /* Is this our entry? */
114 beq skpinv /* If so, skip the inval */
115 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
116skpinv: addi r4,r4,1 /* Increment */
117 cmpwi r4,64 /* Are we done? */
118 bne 1b /* If not, repeat */
119 isync /* If so, context change */
120
121/*
122 * Configure and load pinned entry into TLB slot 63.
123 */
124
125 lis r3,KERNELBASE@h /* Load the kernel virtual address */
126 ori r3,r3,KERNELBASE@l
127
128 /* Kernel is at the base of RAM */
129 li r4, 0 /* Load the kernel physical address */
130
131 /* Load the kernel PID = 0 */
132 li r0,0
133 mtspr SPRN_PID,r0
134 sync
135
136 /* Initialize MMUCR */
137 li r5,0
138 mtspr SPRN_MMUCR,r5
139 sync
140
141 /* pageid fields */
142 clrrwi r3,r3,10 /* Mask off the effective page number */
143 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
144
145 /* xlat fields */
146 clrrwi r4,r4,10 /* Mask off the real page number */
147 /* ERPN is 0 for first 4GB page */
148
149 /* attrib fields */
150 /* Added guarded bit to protect against speculative loads/stores */
151 li r5,0
152 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
153
154 li r0,63 /* TLB slot 63 */
155
156 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
157 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
158 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
159
160 /* Force context change */
161 mfmsr r0
162 mtspr SPRN_SRR1, r0
163 lis r0,3f@h
164 ori r0,r0,3f@l
165 mtspr SPRN_SRR0,r0
166 sync
167 rfi
168
169 /* If necessary, invalidate original entry we used */
1703: cmpwi r23,63
171 beq 4f
172 li r6,0
173 tlbwe r6,r23,PPC44x_TLB_PAGEID
174 isync
175
1764:
177#ifdef CONFIG_SERIAL_TEXT_DEBUG
178 /*
179 * Add temporary UART mapping for early debug.
180 * We can map UART registers wherever we want as long as they don't
181 * interfere with other system mappings (e.g. with pinned entries).
182 * For an example of how we handle this - see ocotea.h. --ebs
183 */
184 /* pageid fields */
185 lis r3,UART0_IO_BASE@h
186 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_4K
187
188 /* xlat fields */
189 lis r4,UART0_PHYS_IO_BASE@h /* RPN depends on SoC */
190#ifdef UART0_PHYS_ERPN
191 ori r4,r4,UART0_PHYS_ERPN /* Add ERPN if above 4GB */
192#endif
193
194 /* attrib fields */
195 li r5,0
196 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_I | PPC44x_TLB_G)
197
198 li r0,62 /* TLB slot 62 */
199
200 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
201 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
202 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
203
204 /* Force context change */
205 isync
206#endif /* CONFIG_SERIAL_TEXT_DEBUG */
207
208 /* Establish the interrupt vector offsets */
209 SET_IVOR(0, CriticalInput);
210 SET_IVOR(1, MachineCheck);
211 SET_IVOR(2, DataStorage);
212 SET_IVOR(3, InstructionStorage);
213 SET_IVOR(4, ExternalInput);
214 SET_IVOR(5, Alignment);
215 SET_IVOR(6, Program);
216 SET_IVOR(7, FloatingPointUnavailable);
217 SET_IVOR(8, SystemCall);
218 SET_IVOR(9, AuxillaryProcessorUnavailable);
219 SET_IVOR(10, Decrementer);
220 SET_IVOR(11, FixedIntervalTimer);
221 SET_IVOR(12, WatchdogTimer);
222 SET_IVOR(13, DataTLBError);
223 SET_IVOR(14, InstructionTLBError);
224 SET_IVOR(15, Debug);
225
226 /* Establish the interrupt vector base */
227 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
228 mtspr SPRN_IVPR,r4
229
230 /*
231 * This is where the main kernel code starts.
232 */
233
234 /* ptr to current */
235 lis r2,init_task@h
236 ori r2,r2,init_task@l
237
238 /* ptr to current thread */
239 addi r4,r2,THREAD /* init task's THREAD */
240 mtspr SPRN_SPRG3,r4
241
242 /* stack */
243 lis r1,init_thread_union@h
244 ori r1,r1,init_thread_union@l
245 li r0,0
246 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
247
248 bl early_init
249
250/*
251 * Decide what sort of machine this is and initialize the MMU.
252 */
253 mr r3,r31
254 mr r4,r30
255 mr r5,r29
256 mr r6,r28
257 mr r7,r27
258 bl machine_init
259 bl MMU_init
260
261 /* Setup PTE pointers for the Abatron bdiGDB */
262 lis r6, swapper_pg_dir@h
263 ori r6, r6, swapper_pg_dir@l
264 lis r5, abatron_pteptrs@h
265 ori r5, r5, abatron_pteptrs@l
266 lis r4, KERNELBASE@h
267 ori r4, r4, KERNELBASE@l
268 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
269 stw r6, 0(r5)
270
271 /* Let's move on */
272 lis r4,start_kernel@h
273 ori r4,r4,start_kernel@l
274 lis r3,MSR_KERNEL@h
275 ori r3,r3,MSR_KERNEL@l
276 mtspr SPRN_SRR0,r4
277 mtspr SPRN_SRR1,r3
278 rfi /* change context and jump to start_kernel */
279
280/*
281 * Interrupt vector entry code
282 *
283 * The Book E MMUs are always on so we don't need to handle
284 * interrupts in real mode as with previous PPC processors. In
285 * this case we handle interrupts in the kernel virtual address
286 * space.
287 *
288 * Interrupt vectors are dynamically placed relative to the
289 * interrupt prefix as determined by the address of interrupt_base.
290 * The interrupt vectors offsets are programmed using the labels
291 * for each interrupt vector entry.
292 *
293 * Interrupt vectors must be aligned on a 16 byte boundary.
294 * We align on a 32 byte cache line boundary for good measure.
295 */
296
297interrupt_base:
298 /* Critical Input Interrupt */
299 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
300
301 /* Machine Check Interrupt */
302#ifdef CONFIG_440A
303 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
304#else
305 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
306#endif
307
308 /* Data Storage Interrupt */
309 START_EXCEPTION(DataStorage)
310 mtspr SPRN_SPRG0, r10 /* Save some working registers */
311 mtspr SPRN_SPRG1, r11
312 mtspr SPRN_SPRG4W, r12
313 mtspr SPRN_SPRG5W, r13
314 mfcr r11
315 mtspr SPRN_SPRG7W, r11
316
317 /*
318 * Check if it was a store fault, if not then bail
319 * because a user tried to access a kernel or
320 * read-protected page. Otherwise, get the
321 * offending address and handle it.
322 */
323 mfspr r10, SPRN_ESR
324 andis. r10, r10, ESR_ST@h
325 beq 2f
326
327 mfspr r10, SPRN_DEAR /* Get faulting address */
328
329 /* If we are faulting a kernel address, we have to use the
330 * kernel page tables.
331 */
332 lis r11, TASK_SIZE@h
333 cmplw r10, r11
334 blt+ 3f
335 lis r11, swapper_pg_dir@h
336 ori r11, r11, swapper_pg_dir@l
337
338 mfspr r12,SPRN_MMUCR
339 rlwinm r12,r12,0,0,23 /* Clear TID */
340
341 b 4f
342
343 /* Get the PGD for the current thread */
3443:
345 mfspr r11,SPRN_SPRG3
346 lwz r11,PGDIR(r11)
347
348 /* Load PID into MMUCR TID */
349 mfspr r12,SPRN_MMUCR /* Get MMUCR */
350 mfspr r13,SPRN_PID /* Get PID */
351 rlwimi r12,r13,0,24,31 /* Set TID */
352
3534:
354 mtspr SPRN_MMUCR,r12
355
356 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
357 lwzx r11, r12, r11 /* Get pgd/pmd entry */
358 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
359 beq 2f /* Bail if no table */
360
361 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
362 lwz r11, 4(r12) /* Get pte entry */
363
364 andi. r13, r11, _PAGE_RW /* Is it writeable? */
365 beq 2f /* Bail if not */
366
367 /* Update 'changed'.
368 */
369 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
370 stw r11, 4(r12) /* Update Linux page table */
371
372 li r13, PPC44x_TLB_SR@l /* Set SR */
373 rlwimi r13, r11, 29, 29, 29 /* SX = _PAGE_HWEXEC */
374 rlwimi r13, r11, 0, 30, 30 /* SW = _PAGE_RW */
375 rlwimi r13, r11, 29, 28, 28 /* UR = _PAGE_USER */
376 rlwimi r12, r11, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */
377 rlwimi r12, r11, 29, 30, 30 /* (_PAGE_USER>>3)->r12 */
378 and r12, r12, r11 /* HWEXEC/RW & USER */
379 rlwimi r13, r12, 0, 26, 26 /* UX = HWEXEC & USER */
380 rlwimi r13, r12, 3, 27, 27 /* UW = RW & USER */
381
382 rlwimi r11,r13,0,26,31 /* Insert static perms */
383
384 rlwinm r11,r11,0,20,15 /* Clear U0-U3 */
385
386 /* find the TLB index that caused the fault. It has to be here. */
387 tlbsx r10, 0, r10
388
389 tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */
390
391 /* Done...restore registers and get out of here.
392 */
393 mfspr r11, SPRN_SPRG7R
394 mtcr r11
395 mfspr r13, SPRN_SPRG5R
396 mfspr r12, SPRN_SPRG4R
397
398 mfspr r11, SPRN_SPRG1
399 mfspr r10, SPRN_SPRG0
400 rfi /* Force context change */
401
4022:
403 /*
404 * The bailout. Restore registers to pre-exception conditions
405 * and call the heavyweights to help us out.
406 */
407 mfspr r11, SPRN_SPRG7R
408 mtcr r11
409 mfspr r13, SPRN_SPRG5R
410 mfspr r12, SPRN_SPRG4R
411
412 mfspr r11, SPRN_SPRG1
413 mfspr r10, SPRN_SPRG0
414 b data_access
415
416 /* Instruction Storage Interrupt */
417 INSTRUCTION_STORAGE_EXCEPTION
418
419 /* External Input Interrupt */
420 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
421
422 /* Alignment Interrupt */
423 ALIGNMENT_EXCEPTION
424
425 /* Program Interrupt */
426 PROGRAM_EXCEPTION
427
428 /* Floating Point Unavailable Interrupt */
429#ifdef CONFIG_PPC_FPU
430 FP_UNAVAILABLE_EXCEPTION
431#else
432 EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
433#endif
434
435 /* System Call Interrupt */
436 START_EXCEPTION(SystemCall)
437 NORMAL_EXCEPTION_PROLOG
438 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
439
440 /* Auxillary Processor Unavailable Interrupt */
441 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
442
443 /* Decrementer Interrupt */
444 DECREMENTER_EXCEPTION
445
446 /* Fixed Internal Timer Interrupt */
447 /* TODO: Add FIT support */
448 EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
449
450 /* Watchdog Timer Interrupt */
451 /* TODO: Add watchdog support */
452#ifdef CONFIG_BOOKE_WDT
453 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
454#else
455 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
456#endif
457
458 /* Data TLB Error Interrupt */
459 START_EXCEPTION(DataTLBError)
460 mtspr SPRN_SPRG0, r10 /* Save some working registers */
461 mtspr SPRN_SPRG1, r11
462 mtspr SPRN_SPRG4W, r12
463 mtspr SPRN_SPRG5W, r13
464 mfcr r11
465 mtspr SPRN_SPRG7W, r11
466 mfspr r10, SPRN_DEAR /* Get faulting address */
467
468 /* If we are faulting a kernel address, we have to use the
469 * kernel page tables.
470 */
471 lis r11, TASK_SIZE@h
472 cmplw r10, r11
473 blt+ 3f
474 lis r11, swapper_pg_dir@h
475 ori r11, r11, swapper_pg_dir@l
476
477 mfspr r12,SPRN_MMUCR
478 rlwinm r12,r12,0,0,23 /* Clear TID */
479
480 b 4f
481
482 /* Get the PGD for the current thread */
4833:
484 mfspr r11,SPRN_SPRG3
485 lwz r11,PGDIR(r11)
486
487 /* Load PID into MMUCR TID */
488 mfspr r12,SPRN_MMUCR
489 mfspr r13,SPRN_PID /* Get PID */
490 rlwimi r12,r13,0,24,31 /* Set TID */
491
4924:
493 mtspr SPRN_MMUCR,r12
494
495 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
496 lwzx r11, r12, r11 /* Get pgd/pmd entry */
497 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
498 beq 2f /* Bail if no table */
499
500 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
501 lwz r11, 4(r12) /* Get pte entry */
502 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
503 beq 2f /* Bail if not present */
504
505 ori r11, r11, _PAGE_ACCESSED
506 stw r11, 4(r12)
507
508 /* Jump to common tlb load */
509 b finish_tlb_load
510
5112:
512 /* The bailout. Restore registers to pre-exception conditions
513 * and call the heavyweights to help us out.
514 */
515 mfspr r11, SPRN_SPRG7R
516 mtcr r11
517 mfspr r13, SPRN_SPRG5R
518 mfspr r12, SPRN_SPRG4R
519 mfspr r11, SPRN_SPRG1
520 mfspr r10, SPRN_SPRG0
521 b data_access
522
523 /* Instruction TLB Error Interrupt */
524 /*
525 * Nearly the same as above, except we get our
526 * information from different registers and bailout
527 * to a different point.
528 */
529 START_EXCEPTION(InstructionTLBError)
530 mtspr SPRN_SPRG0, r10 /* Save some working registers */
531 mtspr SPRN_SPRG1, r11
532 mtspr SPRN_SPRG4W, r12
533 mtspr SPRN_SPRG5W, r13
534 mfcr r11
535 mtspr SPRN_SPRG7W, r11
536 mfspr r10, SPRN_SRR0 /* Get faulting address */
537
538 /* If we are faulting a kernel address, we have to use the
539 * kernel page tables.
540 */
541 lis r11, TASK_SIZE@h
542 cmplw r10, r11
543 blt+ 3f
544 lis r11, swapper_pg_dir@h
545 ori r11, r11, swapper_pg_dir@l
546
547 mfspr r12,SPRN_MMUCR
548 rlwinm r12,r12,0,0,23 /* Clear TID */
549
550 b 4f
551
552 /* Get the PGD for the current thread */
5533:
554 mfspr r11,SPRN_SPRG3
555 lwz r11,PGDIR(r11)
556
557 /* Load PID into MMUCR TID */
558 mfspr r12,SPRN_MMUCR
559 mfspr r13,SPRN_PID /* Get PID */
560 rlwimi r12,r13,0,24,31 /* Set TID */
561
5624:
563 mtspr SPRN_MMUCR,r12
564
565 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
566 lwzx r11, r12, r11 /* Get pgd/pmd entry */
567 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
568 beq 2f /* Bail if no table */
569
570 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
571 lwz r11, 4(r12) /* Get pte entry */
572 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
573 beq 2f /* Bail if not present */
574
575 ori r11, r11, _PAGE_ACCESSED
576 stw r11, 4(r12)
577
578 /* Jump to common TLB load point */
579 b finish_tlb_load
580
5812:
582 /* The bailout. Restore registers to pre-exception conditions
583 * and call the heavyweights to help us out.
584 */
585 mfspr r11, SPRN_SPRG7R
586 mtcr r11
587 mfspr r13, SPRN_SPRG5R
588 mfspr r12, SPRN_SPRG4R
589 mfspr r11, SPRN_SPRG1
590 mfspr r10, SPRN_SPRG0
591 b InstructionStorage
592
593 /* Debug Interrupt */
594 DEBUG_EXCEPTION
595
596/*
597 * Local functions
598 */
599 /*
600 * Data TLB exceptions will bail out to this point
601 * if they can't resolve the lightweight TLB fault.
602 */
603data_access:
604 NORMAL_EXCEPTION_PROLOG
605 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
606 stw r5,_ESR(r11)
607 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
608 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
609
610/*
611
612 * Both the instruction and data TLB miss get to this
613 * point to load the TLB.
614 * r10 - EA of fault
615 * r11 - available to use
616 * r12 - Pointer to the 64-bit PTE
617 * r13 - available to use
618 * MMUCR - loaded with proper value when we get here
619 * Upon exit, we reload everything and RFI.
620 */
621finish_tlb_load:
622 /*
623 * We set execute, because we don't have the granularity to
624 * properly set this at the page level (Linux problem).
625 * If shared is set, we cause a zero PID->TID load.
626 * Many of these bits are software only. Bits we don't set
627 * here we (properly should) assume have the appropriate value.
628 */
629
630 /* Load the next available TLB index */
631 lis r13, tlb_44x_index@ha
632 lwz r13, tlb_44x_index@l(r13)
633 /* Load the TLB high watermark */
634 lis r11, tlb_44x_hwater@ha
635 lwz r11, tlb_44x_hwater@l(r11)
636
637 /* Increment, rollover, and store TLB index */
638 addi r13, r13, 1
639 cmpw 0, r13, r11 /* reserve entries */
640 ble 7f
641 li r13, 0
6427:
643 /* Store the next available TLB index */
644 lis r11, tlb_44x_index@ha
645 stw r13, tlb_44x_index@l(r11)
646
647 lwz r11, 0(r12) /* Get MS word of PTE */
648 lwz r12, 4(r12) /* Get LS word of PTE */
649 rlwimi r11, r12, 0, 0 , 19 /* Insert RPN */
650 tlbwe r11, r13, PPC44x_TLB_XLAT /* Write XLAT */
651
652 /*
653 * Create PAGEID. This is the faulting address,
654 * page size, and valid flag.
655 */
656 li r11, PPC44x_TLB_VALID | PPC44x_TLB_4K
657 rlwimi r10, r11, 0, 20, 31 /* Insert valid and page size */
658 tlbwe r10, r13, PPC44x_TLB_PAGEID /* Write PAGEID */
659
660 li r10, PPC44x_TLB_SR@l /* Set SR */
661 rlwimi r10, r12, 0, 30, 30 /* Set SW = _PAGE_RW */
662 rlwimi r10, r12, 29, 29, 29 /* SX = _PAGE_HWEXEC */
663 rlwimi r10, r12, 29, 28, 28 /* UR = _PAGE_USER */
664 rlwimi r11, r12, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */
665 and r11, r12, r11 /* HWEXEC & USER */
666 rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */
667
668 rlwimi r12, r10, 0, 26, 31 /* Insert static perms */
669 rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */
670 tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */
671
672 /* Done...restore registers and get out of here.
673 */
674 mfspr r11, SPRN_SPRG7R
675 mtcr r11
676 mfspr r13, SPRN_SPRG5R
677 mfspr r12, SPRN_SPRG4R
678 mfspr r11, SPRN_SPRG1
679 mfspr r10, SPRN_SPRG0
680 rfi /* Force context change */
681
682/*
683 * Global functions
684 */
685
686/*
687 * extern void giveup_altivec(struct task_struct *prev)
688 *
689 * The 44x core does not have an AltiVec unit.
690 */
691_GLOBAL(giveup_altivec)
692 blr
693
694/*
695 * extern void giveup_fpu(struct task_struct *prev)
696 *
697 * The 44x core does not have an FPU.
698 */
699#ifndef CONFIG_PPC_FPU
700_GLOBAL(giveup_fpu)
701 blr
702#endif
703
704/*
705 * extern void abort(void)
706 *
707 * At present, this routine just applies a system reset.
708 */
709_GLOBAL(abort)
710 mfspr r13,SPRN_DBCR0
711 oris r13,r13,DBCR0_RST_SYSTEM@h
712 mtspr SPRN_DBCR0,r13
713
714_GLOBAL(set_context)
715
716#ifdef CONFIG_BDI_SWITCH
717 /* Context switch the PTE pointer for the Abatron BDI2000.
718 * The PGDIR is the second parameter.
719 */
720 lis r5, abatron_pteptrs@h
721 ori r5, r5, abatron_pteptrs@l
722 stw r4, 0x4(r5)
723#endif
724 mtspr SPRN_PID,r3
725 isync /* Force context change */
726 blr
727
728/*
729 * We put a few things here that have to be page-aligned. This stuff
730 * goes at the beginning of the data segment, which is page-aligned.
731 */
732 .data
733 .align 12
734 .globl sdata
735sdata:
736 .globl empty_zero_page
737empty_zero_page:
738 .space 4096
739
740/*
741 * To support >32-bit physical addresses, we use an 8KB pgdir.
742 */
743 .globl swapper_pg_dir
744swapper_pg_dir:
745 .space 8192
746
747/* Reserved 4k for the critical exception stack & 4k for the machine
748 * check stack per CPU for kernel mode exceptions */
749 .section .bss
750 .align 12
751exception_stack_bottom:
752 .space BOOKE_EXCEPTION_STACK_SIZE
753 .globl exception_stack_top
754exception_stack_top:
755
756/*
757 * This space gets a copy of optional info passed to us by the bootstrap
758 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
759 */
760 .globl cmd_line
761cmd_line:
762 .space 512
763
764/*
765 * Room for two PTE pointers, usually the kernel and current user pointers
766 * to their respective root page table.
767 */
768abatron_pteptrs:
769 .space 8
diff --git a/arch/ppc/kernel/head_4xx.S b/arch/ppc/kernel/head_4xx.S
deleted file mode 100644
index 51da157a629e..000000000000
--- a/arch/ppc/kernel/head_4xx.S
+++ /dev/null
@@ -1,1021 +0,0 @@
1/*
2 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
3 * Initial PowerPC version.
4 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
5 * Rewritten for PReP
6 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
7 * Low-level exception handers, MMU support, and rewrite.
8 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
9 * PowerPC 8xx modifications.
10 * Copyright (c) 1998-1999 TiVo, Inc.
11 * PowerPC 403GCX modifications.
12 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
13 * PowerPC 403GCX/405GP modifications.
14 * Copyright 2000 MontaVista Software Inc.
15 * PPC405 modifications
16 * PowerPC 403GCX/405GP modifications.
17 * Author: MontaVista Software, Inc.
18 * frank_rowand@mvista.com or source@mvista.com
19 * debbie_chu@mvista.com
20 *
21 *
22 * Module name: head_4xx.S
23 *
24 * Description:
25 * Kernel execution entry point code.
26 *
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; either version
30 * 2 of the License, or (at your option) any later version.
31 *
32 */
33
34#include <asm/processor.h>
35#include <asm/page.h>
36#include <asm/mmu.h>
37#include <asm/pgtable.h>
38#include <asm/ibm4xx.h>
39#include <asm/cputable.h>
40#include <asm/thread_info.h>
41#include <asm/ppc_asm.h>
42#include <asm/asm-offsets.h>
43
44/* As with the other PowerPC ports, it is expected that when code
45 * execution begins here, the following registers contain valid, yet
46 * optional, information:
47 *
48 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
49 * r4 - Starting address of the init RAM disk
50 * r5 - Ending address of the init RAM disk
51 * r6 - Start of kernel command line string (e.g. "mem=96m")
52 * r7 - End of kernel command line string
53 *
54 * This is all going to change RSN when we add bi_recs....... -- Dan
55 */
56 .text
57_GLOBAL(_stext)
58_GLOBAL(_start)
59
60 /* Save parameters we are passed.
61 */
62 mr r31,r3
63 mr r30,r4
64 mr r29,r5
65 mr r28,r6
66 mr r27,r7
67
68 /* We have to turn on the MMU right away so we get cache modes
69 * set correctly.
70 */
71 bl initial_mmu
72
73/* We now have the lower 16 Meg mapped into TLB entries, and the caches
74 * ready to work.
75 */
76turn_on_mmu:
77 lis r0,MSR_KERNEL@h
78 ori r0,r0,MSR_KERNEL@l
79 mtspr SPRN_SRR1,r0
80 lis r0,start_here@h
81 ori r0,r0,start_here@l
82 mtspr SPRN_SRR0,r0
83 SYNC
84 rfi /* enables MMU */
85 b . /* prevent prefetch past rfi */
86
87/*
88 * This area is used for temporarily saving registers during the
89 * critical exception prolog.
90 */
91 . = 0xc0
92crit_save:
93_GLOBAL(crit_r10)
94 .space 4
95_GLOBAL(crit_r11)
96 .space 4
97
98/*
99 * Exception vector entry code. This code runs with address translation
100 * turned off (i.e. using physical addresses). We assume SPRG3 has the
101 * physical address of the current task thread_struct.
102 * Note that we have to have decremented r1 before we write to any fields
103 * of the exception frame, since a critical interrupt could occur at any
104 * time, and it will write to the area immediately below the current r1.
105 */
106#define NORMAL_EXCEPTION_PROLOG \
107 mtspr SPRN_SPRG0,r10; /* save two registers to work with */\
108 mtspr SPRN_SPRG1,r11; \
109 mtspr SPRN_SPRG2,r1; \
110 mfcr r10; /* save CR in r10 for now */\
111 mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
112 andi. r11,r11,MSR_PR; \
113 beq 1f; \
114 mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\
115 lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\
116 addi r1,r1,THREAD_SIZE; \
1171: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\
118 tophys(r11,r1); \
119 stw r10,_CCR(r11); /* save various registers */\
120 stw r12,GPR12(r11); \
121 stw r9,GPR9(r11); \
122 mfspr r10,SPRN_SPRG0; \
123 stw r10,GPR10(r11); \
124 mfspr r12,SPRN_SPRG1; \
125 stw r12,GPR11(r11); \
126 mflr r10; \
127 stw r10,_LINK(r11); \
128 mfspr r10,SPRN_SPRG2; \
129 mfspr r12,SPRN_SRR0; \
130 stw r10,GPR1(r11); \
131 mfspr r9,SPRN_SRR1; \
132 stw r10,0(r11); \
133 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
134 stw r0,GPR0(r11); \
135 SAVE_4GPRS(3, r11); \
136 SAVE_2GPRS(7, r11)
137
138/*
139 * Exception prolog for critical exceptions. This is a little different
140 * from the normal exception prolog above since a critical exception
141 * can potentially occur at any point during normal exception processing.
142 * Thus we cannot use the same SPRG registers as the normal prolog above.
143 * Instead we use a couple of words of memory at low physical addresses.
144 * This is OK since we don't support SMP on these processors.
145 */
146#define CRITICAL_EXCEPTION_PROLOG \
147 stw r10,crit_r10@l(0); /* save two registers to work with */\
148 stw r11,crit_r11@l(0); \
149 mfcr r10; /* save CR in r10 for now */\
150 mfspr r11,SPRN_SRR3; /* check whether user or kernel */\
151 andi. r11,r11,MSR_PR; \
152 lis r11,critical_stack_top@h; \
153 ori r11,r11,critical_stack_top@l; \
154 beq 1f; \
155 /* COMING FROM USER MODE */ \
156 mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\
157 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
158 addi r11,r11,THREAD_SIZE; \
1591: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
160 tophys(r11,r11); \
161 stw r10,_CCR(r11); /* save various registers */\
162 stw r12,GPR12(r11); \
163 stw r9,GPR9(r11); \
164 mflr r10; \
165 stw r10,_LINK(r11); \
166 mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\
167 stw r12,_DEAR(r11); /* since they may have had stuff */\
168 mfspr r9,SPRN_ESR; /* in them at the point where the */\
169 stw r9,_ESR(r11); /* exception was taken */\
170 mfspr r12,SPRN_SRR2; \
171 stw r1,GPR1(r11); \
172 mfspr r9,SPRN_SRR3; \
173 stw r1,0(r11); \
174 tovirt(r1,r11); \
175 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
176 stw r0,GPR0(r11); \
177 SAVE_4GPRS(3, r11); \
178 SAVE_2GPRS(7, r11)
179
180 /*
181 * State at this point:
182 * r9 saved in stack frame, now saved SRR3 & ~MSR_WE
183 * r10 saved in crit_r10 and in stack frame, trashed
184 * r11 saved in crit_r11 and in stack frame,
185 * now phys stack/exception frame pointer
186 * r12 saved in stack frame, now saved SRR2
187 * CR saved in stack frame, CR0.EQ = !SRR3.PR
188 * LR, DEAR, ESR in stack frame
189 * r1 saved in stack frame, now virt stack/excframe pointer
190 * r0, r3-r8 saved in stack frame
191 */
192
193/*
194 * Exception vectors.
195 */
196#define START_EXCEPTION(n, label) \
197 . = n; \
198label:
199
200#define EXCEPTION(n, label, hdlr, xfer) \
201 START_EXCEPTION(n, label); \
202 NORMAL_EXCEPTION_PROLOG; \
203 addi r3,r1,STACK_FRAME_OVERHEAD; \
204 xfer(n, hdlr)
205
206#define CRITICAL_EXCEPTION(n, label, hdlr) \
207 START_EXCEPTION(n, label); \
208 CRITICAL_EXCEPTION_PROLOG; \
209 addi r3,r1,STACK_FRAME_OVERHEAD; \
210 EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
211 NOCOPY, crit_transfer_to_handler, \
212 ret_from_crit_exc)
213
214#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \
215 li r10,trap; \
216 stw r10,TRAP(r11); \
217 lis r10,msr@h; \
218 ori r10,r10,msr@l; \
219 copyee(r10, r9); \
220 bl tfer; \
221 .long hdlr; \
222 .long ret
223
224#define COPY_EE(d, s) rlwimi d,s,0,16,16
225#define NOCOPY(d, s)
226
227#define EXC_XFER_STD(n, hdlr) \
228 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \
229 ret_from_except_full)
230
231#define EXC_XFER_LITE(n, hdlr) \
232 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
233 ret_from_except)
234
235#define EXC_XFER_EE(n, hdlr) \
236 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \
237 ret_from_except_full)
238
239#define EXC_XFER_EE_LITE(n, hdlr) \
240 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \
241 ret_from_except)
242
243
244/*
245 * 0x0100 - Critical Interrupt Exception
246 */
247 CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception)
248
249/*
250 * 0x0200 - Machine Check Exception
251 */
252 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
253
254/*
255 * 0x0300 - Data Storage Exception
256 * This happens for just a few reasons. U0 set (but we don't do that),
257 * or zone protection fault (user violation, write to protected page).
258 * If this is just an update of modified status, we do that quickly
259 * and exit. Otherwise, we call heavywight functions to do the work.
260 */
261 START_EXCEPTION(0x0300, DataStorage)
262 mtspr SPRN_SPRG0, r10 /* Save some working registers */
263 mtspr SPRN_SPRG1, r11
264#ifdef CONFIG_403GCX
265 stw r12, 0(r0)
266 stw r9, 4(r0)
267 mfcr r11
268 mfspr r12, SPRN_PID
269 stw r11, 8(r0)
270 stw r12, 12(r0)
271#else
272 mtspr SPRN_SPRG4, r12
273 mtspr SPRN_SPRG5, r9
274 mfcr r11
275 mfspr r12, SPRN_PID
276 mtspr SPRN_SPRG7, r11
277 mtspr SPRN_SPRG6, r12
278#endif
279
280 /* First, check if it was a zone fault (which means a user
281 * tried to access a kernel or read-protected page - always
282 * a SEGV). All other faults here must be stores, so no
283 * need to check ESR_DST as well. */
284 mfspr r10, SPRN_ESR
285 andis. r10, r10, ESR_DIZ@h
286 bne 2f
287
288 mfspr r10, SPRN_DEAR /* Get faulting address */
289
290 /* If we are faulting a kernel address, we have to use the
291 * kernel page tables.
292 */
293 lis r11, TASK_SIZE@h
294 cmplw r10, r11
295 blt+ 3f
296 lis r11, swapper_pg_dir@h
297 ori r11, r11, swapper_pg_dir@l
298 li r9, 0
299 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
300 b 4f
301
302 /* Get the PGD for the current thread.
303 */
3043:
305 mfspr r11,SPRN_SPRG3
306 lwz r11,PGDIR(r11)
3074:
308 tophys(r11, r11)
309 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
310 lwz r11, 0(r11) /* Get L1 entry */
311 rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */
312 beq 2f /* Bail if no table */
313
314 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
315 lwz r11, 0(r12) /* Get Linux PTE */
316
317 andi. r9, r11, _PAGE_RW /* Is it writeable? */
318 beq 2f /* Bail if not */
319
320 /* Update 'changed'.
321 */
322 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
323 stw r11, 0(r12) /* Update Linux page table */
324
325 /* Most of the Linux PTE is ready to load into the TLB LO.
326 * We set ZSEL, where only the LS-bit determines user access.
327 * We set execute, because we don't have the granularity to
328 * properly set this at the page level (Linux problem).
329 * If shared is set, we cause a zero PID->TID load.
330 * Many of these bits are software only. Bits we don't set
331 * here we (properly should) assume have the appropriate value.
332 */
333 li r12, 0x0ce2
334 andc r11, r11, r12 /* Make sure 20, 21 are zero */
335
336 /* find the TLB index that caused the fault. It has to be here.
337 */
338 tlbsx r9, 0, r10
339
340 tlbwe r11, r9, TLB_DATA /* Load TLB LO */
341
342 /* Done...restore registers and get out of here.
343 */
344#ifdef CONFIG_403GCX
345 lwz r12, 12(r0)
346 lwz r11, 8(r0)
347 mtspr SPRN_PID, r12
348 mtcr r11
349 lwz r9, 4(r0)
350 lwz r12, 0(r0)
351#else
352 mfspr r12, SPRN_SPRG6
353 mfspr r11, SPRN_SPRG7
354 mtspr SPRN_PID, r12
355 mtcr r11
356 mfspr r9, SPRN_SPRG5
357 mfspr r12, SPRN_SPRG4
358#endif
359 mfspr r11, SPRN_SPRG1
360 mfspr r10, SPRN_SPRG0
361 PPC405_ERR77_SYNC
362 rfi /* Should sync shadow TLBs */
363 b . /* prevent prefetch past rfi */
364
3652:
366 /* The bailout. Restore registers to pre-exception conditions
367 * and call the heavyweights to help us out.
368 */
369#ifdef CONFIG_403GCX
370 lwz r12, 12(r0)
371 lwz r11, 8(r0)
372 mtspr SPRN_PID, r12
373 mtcr r11
374 lwz r9, 4(r0)
375 lwz r12, 0(r0)
376#else
377 mfspr r12, SPRN_SPRG6
378 mfspr r11, SPRN_SPRG7
379 mtspr SPRN_PID, r12
380 mtcr r11
381 mfspr r9, SPRN_SPRG5
382 mfspr r12, SPRN_SPRG4
383#endif
384 mfspr r11, SPRN_SPRG1
385 mfspr r10, SPRN_SPRG0
386 b DataAccess
387
388/*
389 * 0x0400 - Instruction Storage Exception
390 * This is caused by a fetch from non-execute or guarded pages.
391 */
392 START_EXCEPTION(0x0400, InstructionAccess)
393 NORMAL_EXCEPTION_PROLOG
394 mr r4,r12 /* Pass SRR0 as arg2 */
395 li r5,0 /* Pass zero as arg3 */
396 EXC_XFER_EE_LITE(0x400, handle_page_fault)
397
398/* 0x0500 - External Interrupt Exception */
399 EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
400
401/* 0x0600 - Alignment Exception */
402 START_EXCEPTION(0x0600, Alignment)
403 NORMAL_EXCEPTION_PROLOG
404 mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */
405 stw r4,_DEAR(r11)
406 addi r3,r1,STACK_FRAME_OVERHEAD
407 EXC_XFER_EE(0x600, alignment_exception)
408
409/* 0x0700 - Program Exception */
410 START_EXCEPTION(0x0700, ProgramCheck)
411 NORMAL_EXCEPTION_PROLOG
412 mfspr r4,SPRN_ESR /* Grab the ESR and save it */
413 stw r4,_ESR(r11)
414 addi r3,r1,STACK_FRAME_OVERHEAD
415 EXC_XFER_STD(0x700, program_check_exception)
416
417 EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE)
418 EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE)
419 EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE)
420 EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE)
421
422/* 0x0C00 - System Call Exception */
423 START_EXCEPTION(0x0C00, SystemCall)
424 NORMAL_EXCEPTION_PROLOG
425 EXC_XFER_EE_LITE(0xc00, DoSyscall)
426
427 EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE)
428 EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE)
429 EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE)
430
431/* 0x1000 - Programmable Interval Timer (PIT) Exception */
432 START_EXCEPTION(0x1000, Decrementer)
433 NORMAL_EXCEPTION_PROLOG
434 lis r0,TSR_PIS@h
435 mtspr SPRN_TSR,r0 /* Clear the PIT exception */
436 addi r3,r1,STACK_FRAME_OVERHEAD
437 EXC_XFER_LITE(0x1000, timer_interrupt)
438
439#if 0
440/* NOTE:
441 * FIT and WDT handlers are not implemented yet.
442 */
443
444/* 0x1010 - Fixed Interval Timer (FIT) Exception
445*/
446 STND_EXCEPTION(0x1010, FITException, unknown_exception)
447
448/* 0x1020 - Watchdog Timer (WDT) Exception
449*/
450#ifdef CONFIG_BOOKE_WDT
451 CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException)
452#else
453 CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception)
454#endif
455#endif
456
457/* 0x1100 - Data TLB Miss Exception
458 * As the name implies, translation is not in the MMU, so search the
459 * page tables and fix it. The only purpose of this function is to
460 * load TLB entries from the page table if they exist.
461 */
462 START_EXCEPTION(0x1100, DTLBMiss)
463 mtspr SPRN_SPRG0, r10 /* Save some working registers */
464 mtspr SPRN_SPRG1, r11
465#ifdef CONFIG_403GCX
466 stw r12, 0(r0)
467 stw r9, 4(r0)
468 mfcr r11
469 mfspr r12, SPRN_PID
470 stw r11, 8(r0)
471 stw r12, 12(r0)
472#else
473 mtspr SPRN_SPRG4, r12
474 mtspr SPRN_SPRG5, r9
475 mfcr r11
476 mfspr r12, SPRN_PID
477 mtspr SPRN_SPRG7, r11
478 mtspr SPRN_SPRG6, r12
479#endif
480 mfspr r10, SPRN_DEAR /* Get faulting address */
481
482 /* If we are faulting a kernel address, we have to use the
483 * kernel page tables.
484 */
485 lis r11, TASK_SIZE@h
486 cmplw r10, r11
487 blt+ 3f
488 lis r11, swapper_pg_dir@h
489 ori r11, r11, swapper_pg_dir@l
490 li r9, 0
491 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
492 b 4f
493
494 /* Get the PGD for the current thread.
495 */
4963:
497 mfspr r11,SPRN_SPRG3
498 lwz r11,PGDIR(r11)
4994:
500 tophys(r11, r11)
501 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
502 lwz r12, 0(r11) /* Get L1 entry */
503 andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */
504 beq 2f /* Bail if no table */
505
506 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
507 lwz r11, 0(r12) /* Get Linux PTE */
508 andi. r9, r11, _PAGE_PRESENT
509 beq 5f
510
511 ori r11, r11, _PAGE_ACCESSED
512 stw r11, 0(r12)
513
514 /* Create TLB tag. This is the faulting address plus a static
515 * set of bits. These are size, valid, E, U0.
516 */
517 li r12, 0x00c0
518 rlwimi r10, r12, 0, 20, 31
519
520 b finish_tlb_load
521
5222: /* Check for possible large-page pmd entry */
523 rlwinm. r9, r12, 2, 22, 24
524 beq 5f
525
526 /* Create TLB tag. This is the faulting address, plus a static
527 * set of bits (valid, E, U0) plus the size from the PMD.
528 */
529 ori r9, r9, 0x40
530 rlwimi r10, r9, 0, 20, 31
531 mr r11, r12
532
533 b finish_tlb_load
534
5355:
536 /* The bailout. Restore registers to pre-exception conditions
537 * and call the heavyweights to help us out.
538 */
539#ifdef CONFIG_403GCX
540 lwz r12, 12(r0)
541 lwz r11, 8(r0)
542 mtspr SPRN_PID, r12
543 mtcr r11
544 lwz r9, 4(r0)
545 lwz r12, 0(r0)
546#else
547 mfspr r12, SPRN_SPRG6
548 mfspr r11, SPRN_SPRG7
549 mtspr SPRN_PID, r12
550 mtcr r11
551 mfspr r9, SPRN_SPRG5
552 mfspr r12, SPRN_SPRG4
553#endif
554 mfspr r11, SPRN_SPRG1
555 mfspr r10, SPRN_SPRG0
556 b DataAccess
557
558/* 0x1200 - Instruction TLB Miss Exception
559 * Nearly the same as above, except we get our information from different
560 * registers and bailout to a different point.
561 */
562 START_EXCEPTION(0x1200, ITLBMiss)
563 mtspr SPRN_SPRG0, r10 /* Save some working registers */
564 mtspr SPRN_SPRG1, r11
565#ifdef CONFIG_403GCX
566 stw r12, 0(r0)
567 stw r9, 4(r0)
568 mfcr r11
569 mfspr r12, SPRN_PID
570 stw r11, 8(r0)
571 stw r12, 12(r0)
572#else
573 mtspr SPRN_SPRG4, r12
574 mtspr SPRN_SPRG5, r9
575 mfcr r11
576 mfspr r12, SPRN_PID
577 mtspr SPRN_SPRG7, r11
578 mtspr SPRN_SPRG6, r12
579#endif
580 mfspr r10, SPRN_SRR0 /* Get faulting address */
581
582 /* If we are faulting a kernel address, we have to use the
583 * kernel page tables.
584 */
585 lis r11, TASK_SIZE@h
586 cmplw r10, r11
587 blt+ 3f
588 lis r11, swapper_pg_dir@h
589 ori r11, r11, swapper_pg_dir@l
590 li r9, 0
591 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
592 b 4f
593
594 /* Get the PGD for the current thread.
595 */
5963:
597 mfspr r11,SPRN_SPRG3
598 lwz r11,PGDIR(r11)
5994:
600 tophys(r11, r11)
601 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
602 lwz r12, 0(r11) /* Get L1 entry */
603 andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */
604 beq 2f /* Bail if no table */
605
606 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
607 lwz r11, 0(r12) /* Get Linux PTE */
608 andi. r9, r11, _PAGE_PRESENT
609 beq 5f
610
611 ori r11, r11, _PAGE_ACCESSED
612 stw r11, 0(r12)
613
614 /* Create TLB tag. This is the faulting address plus a static
615 * set of bits. These are size, valid, E, U0.
616 */
617 li r12, 0x00c0
618 rlwimi r10, r12, 0, 20, 31
619
620 b finish_tlb_load
621
6222: /* Check for possible large-page pmd entry */
623 rlwinm. r9, r12, 2, 22, 24
624 beq 5f
625
626 /* Create TLB tag. This is the faulting address, plus a static
627 * set of bits (valid, E, U0) plus the size from the PMD.
628 */
629 ori r9, r9, 0x40
630 rlwimi r10, r9, 0, 20, 31
631 mr r11, r12
632
633 b finish_tlb_load
634
6355:
636 /* The bailout. Restore registers to pre-exception conditions
637 * and call the heavyweights to help us out.
638 */
639#ifdef CONFIG_403GCX
640 lwz r12, 12(r0)
641 lwz r11, 8(r0)
642 mtspr SPRN_PID, r12
643 mtcr r11
644 lwz r9, 4(r0)
645 lwz r12, 0(r0)
646#else
647 mfspr r12, SPRN_SPRG6
648 mfspr r11, SPRN_SPRG7
649 mtspr SPRN_PID, r12
650 mtcr r11
651 mfspr r9, SPRN_SPRG5
652 mfspr r12, SPRN_SPRG4
653#endif
654 mfspr r11, SPRN_SPRG1
655 mfspr r10, SPRN_SPRG0
656 b InstructionAccess
657
658 EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE)
659 EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE)
660 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
661 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
662#ifdef CONFIG_IBM405_ERR51
663 /* 405GP errata 51 */
664 START_EXCEPTION(0x1700, Trap_17)
665 b DTLBMiss
666#else
667 EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
668#endif
669 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
670 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
671 EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE)
672 EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE)
673 EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE)
674 EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE)
675 EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE)
676 EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE)
677
678/* Check for a single step debug exception while in an exception
679 * handler before state has been saved. This is to catch the case
680 * where an instruction that we are trying to single step causes
681 * an exception (eg ITLB/DTLB miss) and thus the first instruction of
682 * the exception handler generates a single step debug exception.
683 *
684 * If we get a debug trap on the first instruction of an exception handler,
685 * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is
686 * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR).
687 * The exception handler was handling a non-critical interrupt, so it will
688 * save (and later restore) the MSR via SPRN_SRR1, which will still have
689 * the MSR_DE bit set.
690 */
691 /* 0x2000 - Debug Exception */
692 START_EXCEPTION(0x2000, DebugTrap)
693 CRITICAL_EXCEPTION_PROLOG
694
695 /*
696 * If this is a single step or branch-taken exception in an
697 * exception entry sequence, it was probably meant to apply to
698 * the code where the exception occurred (since exception entry
699 * doesn't turn off DE automatically). We simulate the effect
700 * of turning off DE on entry to an exception handler by turning
701 * off DE in the SRR3 value and clearing the debug status.
702 */
703 mfspr r10,SPRN_DBSR /* check single-step/branch taken */
704 andis. r10,r10,DBSR_IC@h
705 beq+ 2f
706
707 andi. r10,r9,MSR_IR|MSR_PR /* check supervisor + MMU off */
708 beq 1f /* branch and fix it up */
709
710 mfspr r10,SPRN_SRR2 /* Faulting instruction address */
711 cmplwi r10,0x2100
712 bgt+ 2f /* address above exception vectors */
713
714 /* here it looks like we got an inappropriate debug exception. */
7151: rlwinm r9,r9,0,~MSR_DE /* clear DE in the SRR3 value */
716 lis r10,DBSR_IC@h /* clear the IC event */
717 mtspr SPRN_DBSR,r10
718 /* restore state and get out */
719 lwz r10,_CCR(r11)
720 lwz r0,GPR0(r11)
721 lwz r1,GPR1(r11)
722 mtcrf 0x80,r10
723 mtspr SPRN_SRR2,r12
724 mtspr SPRN_SRR3,r9
725 lwz r9,GPR9(r11)
726 lwz r12,GPR12(r11)
727 lwz r10,crit_r10@l(0)
728 lwz r11,crit_r11@l(0)
729 PPC405_ERR77_SYNC
730 rfci
731 b .
732
733 /* continue normal handling for a critical exception... */
7342: mfspr r4,SPRN_DBSR
735 addi r3,r1,STACK_FRAME_OVERHEAD
736 EXC_XFER_TEMPLATE(DebugException, 0x2002, \
737 (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
738 NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
739
740/*
741 * The other Data TLB exceptions bail out to this point
742 * if they can't resolve the lightweight TLB fault.
743 */
744DataAccess:
745 NORMAL_EXCEPTION_PROLOG
746 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
747 stw r5,_ESR(r11)
748 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
749 EXC_XFER_EE_LITE(0x300, handle_page_fault)
750
751/* Other PowerPC processors, namely those derived from the 6xx-series
752 * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
753 * However, for the 4xx-series processors these are neither defined nor
754 * reserved.
755 */
756
757 /* Damn, I came up one instruction too many to fit into the
758 * exception space :-). Both the instruction and data TLB
759 * miss get to this point to load the TLB.
760 * r10 - TLB_TAG value
761 * r11 - Linux PTE
762 * r12, r9 - avilable to use
763 * PID - loaded with proper value when we get here
764 * Upon exit, we reload everything and RFI.
765 * Actually, it will fit now, but oh well.....a common place
766 * to load the TLB.
767 */
768tlb_4xx_index:
769 .long 0
770finish_tlb_load:
771 /* load the next available TLB index.
772 */
773 lwz r9, tlb_4xx_index@l(0)
774 addi r9, r9, 1
775 andi. r9, r9, (PPC4XX_TLB_SIZE-1)
776 stw r9, tlb_4xx_index@l(0)
777
7786:
779 /*
780 * Clear out the software-only bits in the PTE to generate the
781 * TLB_DATA value. These are the bottom 2 bits of the RPM, the
782 * top 3 bits of the zone field, and M.
783 */
784 li r12, 0x0ce2
785 andc r11, r11, r12
786
787 tlbwe r11, r9, TLB_DATA /* Load TLB LO */
788 tlbwe r10, r9, TLB_TAG /* Load TLB HI */
789
790 /* Done...restore registers and get out of here.
791 */
792#ifdef CONFIG_403GCX
793 lwz r12, 12(r0)
794 lwz r11, 8(r0)
795 mtspr SPRN_PID, r12
796 mtcr r11
797 lwz r9, 4(r0)
798 lwz r12, 0(r0)
799#else
800 mfspr r12, SPRN_SPRG6
801 mfspr r11, SPRN_SPRG7
802 mtspr SPRN_PID, r12
803 mtcr r11
804 mfspr r9, SPRN_SPRG5
805 mfspr r12, SPRN_SPRG4
806#endif
807 mfspr r11, SPRN_SPRG1
808 mfspr r10, SPRN_SPRG0
809 PPC405_ERR77_SYNC
810 rfi /* Should sync shadow TLBs */
811 b . /* prevent prefetch past rfi */
812
813/* extern void giveup_fpu(struct task_struct *prev)
814 *
815 * The PowerPC 4xx family of processors do not have an FPU, so this just
816 * returns.
817 */
818_GLOBAL(giveup_fpu)
819 blr
820
821/* This is where the main kernel code starts.
822 */
823start_here:
824
825 /* ptr to current */
826 lis r2,init_task@h
827 ori r2,r2,init_task@l
828
829 /* ptr to phys current thread */
830 tophys(r4,r2)
831 addi r4,r4,THREAD /* init task's THREAD */
832 mtspr SPRN_SPRG3,r4
833
834 /* stack */
835 lis r1,init_thread_union@ha
836 addi r1,r1,init_thread_union@l
837 li r0,0
838 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
839
840 bl early_init /* We have to do this with MMU on */
841
842/*
843 * Decide what sort of machine this is and initialize the MMU.
844 */
845 mr r3,r31
846 mr r4,r30
847 mr r5,r29
848 mr r6,r28
849 mr r7,r27
850 bl machine_init
851 bl MMU_init
852
853/* Go back to running unmapped so we can load up new values
854 * and change to using our exception vectors.
855 * On the 4xx, all we have to do is invalidate the TLB to clear
856 * the old 16M byte TLB mappings.
857 */
858 lis r4,2f@h
859 ori r4,r4,2f@l
860 tophys(r4,r4)
861 lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h
862 ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l
863 mtspr SPRN_SRR0,r4
864 mtspr SPRN_SRR1,r3
865 rfi
866 b . /* prevent prefetch past rfi */
867
868/* Load up the kernel context */
8692:
870 sync /* Flush to memory before changing TLB */
871 tlbia
872 isync /* Flush shadow TLBs */
873
874 /* set up the PTE pointers for the Abatron bdiGDB.
875 */
876 lis r6, swapper_pg_dir@h
877 ori r6, r6, swapper_pg_dir@l
878 lis r5, abatron_pteptrs@h
879 ori r5, r5, abatron_pteptrs@l
880 stw r5, 0xf0(r0) /* Must match your Abatron config file */
881 tophys(r5,r5)
882 stw r6, 0(r5)
883
884/* Now turn on the MMU for real! */
885 lis r4,MSR_KERNEL@h
886 ori r4,r4,MSR_KERNEL@l
887 lis r3,start_kernel@h
888 ori r3,r3,start_kernel@l
889 mtspr SPRN_SRR0,r3
890 mtspr SPRN_SRR1,r4
891 rfi /* enable MMU and jump to start_kernel */
892 b . /* prevent prefetch past rfi */
893
894/* Set up the initial MMU state so we can do the first level of
895 * kernel initialization. This maps the first 16 MBytes of memory 1:1
896 * virtual to physical and more importantly sets the cache mode.
897 */
898initial_mmu:
899 tlbia /* Invalidate all TLB entries */
900 isync
901
902 /* We should still be executing code at physical address 0x0000xxxx
903 * at this point. However, start_here is at virtual address
904 * 0xC000xxxx. So, set up a TLB mapping to cover this once
905 * translation is enabled.
906 */
907
908 lis r3,KERNELBASE@h /* Load the kernel virtual address */
909 ori r3,r3,KERNELBASE@l
910 tophys(r4,r3) /* Load the kernel physical address */
911
912 iccci r0,r3 /* Invalidate the i-cache before use */
913
914 /* Load the kernel PID.
915 */
916 li r0,0
917 mtspr SPRN_PID,r0
918 sync
919
920 /* Configure and load two entries into TLB slots 62 and 63.
921 * In case we are pinning TLBs, these are reserved in by the
922 * other TLB functions. If not reserving, then it doesn't
923 * matter where they are loaded.
924 */
925 clrrwi r4,r4,10 /* Mask off the real page number */
926 ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
927
928 clrrwi r3,r3,10 /* Mask off the effective page number */
929 ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
930
931 li r0,63 /* TLB slot 63 */
932
933 tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */
934 tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */
935
936#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE)
937
938 /* Load a TLB entry for the UART, so that ppc4xx_progress() can use
939 * the UARTs nice and early. We use a 4k real==virtual mapping. */
940
941 lis r3,SERIAL_DEBUG_IO_BASE@h
942 ori r3,r3,SERIAL_DEBUG_IO_BASE@l
943 mr r4,r3
944 clrrwi r4,r4,12
945 ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
946
947 clrrwi r3,r3,12
948 ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
949
950 li r0,0 /* TLB slot 0 */
951 tlbwe r4,r0,TLB_DATA
952 tlbwe r3,r0,TLB_TAG
953#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */
954
955 isync
956
957 /* Establish the exception vector base
958 */
959 lis r4,KERNELBASE@h /* EVPR only uses the high 16-bits */
960 tophys(r0,r4) /* Use the physical address */
961 mtspr SPRN_EVPR,r0
962
963 blr
964
965_GLOBAL(abort)
966 mfspr r13,SPRN_DBCR0
967 oris r13,r13,DBCR0_RST_SYSTEM@h
968 mtspr SPRN_DBCR0,r13
969
970_GLOBAL(set_context)
971
972#ifdef CONFIG_BDI_SWITCH
973 /* Context switch the PTE pointer for the Abatron BDI2000.
974 * The PGDIR is the second parameter.
975 */
976 lis r5, KERNELBASE@h
977 lwz r5, 0xf0(r5)
978 stw r4, 0x4(r5)
979#endif
980 sync
981 mtspr SPRN_PID,r3
982 isync /* Need an isync to flush shadow */
983 /* TLBs after changing PID */
984 blr
985
986/* We put a few things here that have to be page-aligned. This stuff
987 * goes at the beginning of the data segment, which is page-aligned.
988 */
989 .data
990 .align 12
991 .globl sdata
992sdata:
993 .globl empty_zero_page
994empty_zero_page:
995 .space 4096
996 .globl swapper_pg_dir
997swapper_pg_dir:
998 .space 4096
999
1000
1001/* Stack for handling critical exceptions from kernel mode */
1002 .section .bss
1003 .align 12
1004exception_stack_bottom:
1005 .space 4096
1006critical_stack_top:
1007 .globl exception_stack_top
1008exception_stack_top:
1009
1010/* This space gets a copy of optional info passed to us by the bootstrap
1011 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1012 */
1013 .globl cmd_line
1014cmd_line:
1015 .space 512
1016
1017/* Room for two PTE pointers, usually the kernel and current user pointers
1018 * to their respective root page table.
1019 */
1020abatron_pteptrs:
1021 .space 8
diff --git a/arch/ppc/kernel/head_8xx.S b/arch/ppc/kernel/head_8xx.S
deleted file mode 100644
index 321bda2de2cb..000000000000
--- a/arch/ppc/kernel/head_8xx.S
+++ /dev/null
@@ -1,959 +0,0 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Low-level exception handlers and MMU support
7 * rewritten by Paul Mackerras.
8 * Copyright (C) 1996 Paul Mackerras.
9 * MPC8xx modifications by Dan Malek
10 * Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains low-level support and setup for PowerPC 8xx
13 * embedded processors, including trap and interrupt dispatch.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <asm/processor.h>
23#include <asm/page.h>
24#include <asm/mmu.h>
25#include <asm/cache.h>
26#include <asm/pgtable.h>
27#include <asm/cputable.h>
28#include <asm/thread_info.h>
29#include <asm/ppc_asm.h>
30#include <asm/asm-offsets.h>
31
32/* Macro to make the code more readable. */
33#ifdef CONFIG_8xx_CPU6
34#define DO_8xx_CPU6(val, reg) \
35 li reg, val; \
36 stw reg, 12(r0); \
37 lwz reg, 12(r0);
38#else
39#define DO_8xx_CPU6(val, reg)
40#endif
41 .text
42 .globl _stext
43_stext:
44 .text
45 .globl _start
46_start:
47
48/* MPC8xx
49 * This port was done on an MBX board with an 860. Right now I only
50 * support an ELF compressed (zImage) boot from EPPC-Bug because the
51 * code there loads up some registers before calling us:
52 * r3: ptr to board info data
53 * r4: initrd_start or if no initrd then 0
54 * r5: initrd_end - unused if r4 is 0
55 * r6: Start of command line string
56 * r7: End of command line string
57 *
58 * I decided to use conditional compilation instead of checking PVR and
59 * adding more processor specific branches around code I don't need.
60 * Since this is an embedded processor, I also appreciate any memory
61 * savings I can get.
62 *
63 * The MPC8xx does not have any BATs, but it supports large page sizes.
64 * We first initialize the MMU to support 8M byte pages, then load one
65 * entry into each of the instruction and data TLBs to map the first
66 * 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to
67 * the "internal" processor registers before MMU_init is called.
68 *
69 * The TLB code currently contains a major hack. Since I use the condition
70 * code register, I have to save and restore it. I am out of registers, so
71 * I just store it in memory location 0 (the TLB handlers are not reentrant).
72 * To avoid making any decisions, I need to use the "segment" valid bit
73 * in the first level table, but that would require many changes to the
74 * Linux page directory/table functions that I don't want to do right now.
75 *
76 * I used to use SPRG2 for a temporary register in the TLB handler, but it
77 * has since been put to other uses. I now use a hack to save a register
78 * and the CCR at memory location 0.....Someday I'll fix this.....
79 * -- Dan
80 */
81 .globl __start
82__start:
83 mr r31,r3 /* save parameters */
84 mr r30,r4
85 mr r29,r5
86 mr r28,r6
87 mr r27,r7
88
89 /* We have to turn on the MMU right away so we get cache modes
90 * set correctly.
91 */
92 bl initial_mmu
93
94/* We now have the lower 8 Meg mapped into TLB entries, and the caches
95 * ready to work.
96 */
97
98turn_on_mmu:
99 mfmsr r0
100 ori r0,r0,MSR_DR|MSR_IR
101 mtspr SPRN_SRR1,r0
102 lis r0,start_here@h
103 ori r0,r0,start_here@l
104 mtspr SPRN_SRR0,r0
105 SYNC
106 rfi /* enables MMU */
107
108/*
109 * Exception entry code. This code runs with address translation
110 * turned off, i.e. using physical addresses.
111 * We assume sprg3 has the physical address of the current
112 * task's thread_struct.
113 */
114#define EXCEPTION_PROLOG \
115 mtspr SPRN_SPRG0,r10; \
116 mtspr SPRN_SPRG1,r11; \
117 mfcr r10; \
118 EXCEPTION_PROLOG_1; \
119 EXCEPTION_PROLOG_2
120
121#define EXCEPTION_PROLOG_1 \
122 mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
123 andi. r11,r11,MSR_PR; \
124 tophys(r11,r1); /* use tophys(r1) if kernel */ \
125 beq 1f; \
126 mfspr r11,SPRN_SPRG3; \
127 lwz r11,THREAD_INFO-THREAD(r11); \
128 addi r11,r11,THREAD_SIZE; \
129 tophys(r11,r11); \
1301: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
131
132
133#define EXCEPTION_PROLOG_2 \
134 CLR_TOP32(r11); \
135 stw r10,_CCR(r11); /* save registers */ \
136 stw r12,GPR12(r11); \
137 stw r9,GPR9(r11); \
138 mfspr r10,SPRN_SPRG0; \
139 stw r10,GPR10(r11); \
140 mfspr r12,SPRN_SPRG1; \
141 stw r12,GPR11(r11); \
142 mflr r10; \
143 stw r10,_LINK(r11); \
144 mfspr r12,SPRN_SRR0; \
145 mfspr r9,SPRN_SRR1; \
146 stw r1,GPR1(r11); \
147 stw r1,0(r11); \
148 tovirt(r1,r11); /* set new kernel sp */ \
149 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
150 MTMSRD(r10); /* (except for mach check in rtas) */ \
151 stw r0,GPR0(r11); \
152 SAVE_4GPRS(3, r11); \
153 SAVE_2GPRS(7, r11)
154
155/*
156 * Note: code which follows this uses cr0.eq (set if from kernel),
157 * r11, r12 (SRR0), and r9 (SRR1).
158 *
159 * Note2: once we have set r1 we are in a position to take exceptions
160 * again, and we could thus set MSR:RI at that point.
161 */
162
163/*
164 * Exception vectors.
165 */
166#define EXCEPTION(n, label, hdlr, xfer) \
167 . = n; \
168label: \
169 EXCEPTION_PROLOG; \
170 addi r3,r1,STACK_FRAME_OVERHEAD; \
171 xfer(n, hdlr)
172
173#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
174 li r10,trap; \
175 stw r10,TRAP(r11); \
176 li r10,MSR_KERNEL; \
177 copyee(r10, r9); \
178 bl tfer; \
179i##n: \
180 .long hdlr; \
181 .long ret
182
183#define COPY_EE(d, s) rlwimi d,s,0,16,16
184#define NOCOPY(d, s)
185
186#define EXC_XFER_STD(n, hdlr) \
187 EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
188 ret_from_except_full)
189
190#define EXC_XFER_LITE(n, hdlr) \
191 EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
192 ret_from_except)
193
194#define EXC_XFER_EE(n, hdlr) \
195 EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
196 ret_from_except_full)
197
198#define EXC_XFER_EE_LITE(n, hdlr) \
199 EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
200 ret_from_except)
201
202/* System reset */
203 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
204
205/* Machine check */
206 . = 0x200
207MachineCheck:
208 EXCEPTION_PROLOG
209 mfspr r4,SPRN_DAR
210 stw r4,_DAR(r11)
211 mfspr r5,SPRN_DSISR
212 stw r5,_DSISR(r11)
213 addi r3,r1,STACK_FRAME_OVERHEAD
214 EXC_XFER_STD(0x200, machine_check_exception)
215
216/* Data access exception.
217 * This is "never generated" by the MPC8xx. We jump to it for other
218 * translation errors.
219 */
220 . = 0x300
221DataAccess:
222 EXCEPTION_PROLOG
223 mfspr r10,SPRN_DSISR
224 stw r10,_DSISR(r11)
225 mr r5,r10
226 mfspr r4,SPRN_DAR
227 EXC_XFER_EE_LITE(0x300, handle_page_fault)
228
229/* Instruction access exception.
230 * This is "never generated" by the MPC8xx. We jump to it for other
231 * translation errors.
232 */
233 . = 0x400
234InstructionAccess:
235 EXCEPTION_PROLOG
236 mr r4,r12
237 mr r5,r9
238 EXC_XFER_EE_LITE(0x400, handle_page_fault)
239
240/* External interrupt */
241 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
242
243/* Alignment exception */
244 . = 0x600
245Alignment:
246 EXCEPTION_PROLOG
247 mfspr r4,SPRN_DAR
248 stw r4,_DAR(r11)
249 mfspr r5,SPRN_DSISR
250 stw r5,_DSISR(r11)
251 addi r3,r1,STACK_FRAME_OVERHEAD
252 EXC_XFER_EE(0x600, alignment_exception)
253
254/* Program check exception */
255 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
256
257/* No FPU on MPC8xx. This exception is not supposed to happen.
258*/
259 EXCEPTION(0x800, FPUnavailable, unknown_exception, EXC_XFER_STD)
260
261/* Decrementer */
262 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
263
264 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
265 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
266
267/* System call */
268 . = 0xc00
269SystemCall:
270 EXCEPTION_PROLOG
271 EXC_XFER_EE_LITE(0xc00, DoSyscall)
272
273/* Single step - not used on 601 */
274 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
275 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
276 EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_EE)
277
278/* On the MPC8xx, this is a software emulation interrupt. It occurs
279 * for all unimplemented and illegal instructions.
280 */
281 EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD)
282
283 . = 0x1100
284/*
285 * For the MPC8xx, this is a software tablewalk to load the instruction
286 * TLB. It is modelled after the example in the Motorola manual. The task
287 * switch loads the M_TWB register with the pointer to the first level table.
288 * If we discover there is no second level table (value is zero) or if there
289 * is an invalid pte, we load that into the TLB, which causes another fault
290 * into the TLB Error interrupt where we can handle such problems.
291 * We have to use the MD_xxx registers for the tablewalk because the
292 * equivalent MI_xxx registers only perform the attribute functions.
293 */
294InstructionTLBMiss:
295#ifdef CONFIG_8xx_CPU6
296 stw r3, 8(r0)
297#endif
298 DO_8xx_CPU6(0x3f80, r3)
299 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
300 mfcr r10
301 stw r10, 0(r0)
302 stw r11, 4(r0)
303 mfspr r10, SPRN_SRR0 /* Get effective address of fault */
304 DO_8xx_CPU6(0x3780, r3)
305 mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */
306 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
307
308 /* If we are faulting a kernel address, we have to use the
309 * kernel page tables.
310 */
311 andi. r11, r10, 0x0800 /* Address >= 0x80000000 */
312 beq 3f
313 lis r11, swapper_pg_dir@h
314 ori r11, r11, swapper_pg_dir@l
315 rlwimi r10, r11, 0, 2, 19
3163:
317 lwz r11, 0(r10) /* Get the level 1 entry */
318 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
319 beq 2f /* If zero, don't try to find a pte */
320
321 /* We have a pte table, so load the MI_TWC with the attributes
322 * for this "segment."
323 */
324 ori r11,r11,1 /* Set valid bit */
325 DO_8xx_CPU6(0x2b80, r3)
326 mtspr SPRN_MI_TWC, r11 /* Set segment attributes */
327 DO_8xx_CPU6(0x3b80, r3)
328 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
329 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
330 lwz r10, 0(r11) /* Get the pte */
331
332#ifdef CONFIG_SWAP
333 /* do not set the _PAGE_ACCESSED bit of a non-present page */
334 andi. r11, r10, _PAGE_PRESENT
335 beq 4f
336 ori r10, r10, _PAGE_ACCESSED
337 mfspr r11, SPRN_MD_TWC /* get the pte address again */
338 stw r10, 0(r11)
3394:
340#else
341 ori r10, r10, _PAGE_ACCESSED
342 stw r10, 0(r11)
343#endif
344
345 /* The Linux PTE won't go exactly into the MMU TLB.
346 * Software indicator bits 21, 22 and 28 must be clear.
347 * Software indicator bits 24, 25, 26, and 27 must be
348 * set. All other Linux PTE bits control the behavior
349 * of the MMU.
350 */
3512: li r11, 0x00f0
352 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
353 DO_8xx_CPU6(0x2d80, r3)
354 mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
355
356 mfspr r10, SPRN_M_TW /* Restore registers */
357 lwz r11, 0(r0)
358 mtcr r11
359 lwz r11, 4(r0)
360#ifdef CONFIG_8xx_CPU6
361 lwz r3, 8(r0)
362#endif
363 rfi
364
365 . = 0x1200
366DataStoreTLBMiss:
367 stw r3, 8(r0)
368 DO_8xx_CPU6(0x3f80, r3)
369 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
370 mfcr r10
371 stw r10, 0(r0)
372 stw r11, 4(r0)
373 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
374
375 /* If we are faulting a kernel address, we have to use the
376 * kernel page tables.
377 */
378 andi. r11, r10, 0x0800
379 beq 3f
380 lis r11, swapper_pg_dir@h
381 ori r11, r11, swapper_pg_dir@l
382 rlwimi r10, r11, 0, 2, 19
383 stw r12, 16(r0)
384 b LoadLargeDTLB
3853:
386 lwz r11, 0(r10) /* Get the level 1 entry */
387 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
388 beq 2f /* If zero, don't try to find a pte */
389
390 /* We have a pte table, so load fetch the pte from the table.
391 */
392 ori r11, r11, 1 /* Set valid bit in physical L2 page */
393 DO_8xx_CPU6(0x3b80, r3)
394 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
395 mfspr r10, SPRN_MD_TWC /* ....and get the pte address */
396 lwz r10, 0(r10) /* Get the pte */
397
398 /* Insert the Guarded flag into the TWC from the Linux PTE.
399 * It is bit 27 of both the Linux PTE and the TWC (at least
400 * I got that right :-). It will be better when we can put
401 * this into the Linux pgd/pmd and load it in the operation
402 * above.
403 */
404 rlwimi r11, r10, 0, 27, 27
405 DO_8xx_CPU6(0x3b80, r3)
406 mtspr SPRN_MD_TWC, r11
407
408#ifdef CONFIG_SWAP
409 /* do not set the _PAGE_ACCESSED bit of a non-present page */
410 andi. r11, r10, _PAGE_PRESENT
411 beq 4f
412 ori r10, r10, _PAGE_ACCESSED
4134:
414 /* and update pte in table */
415#else
416 ori r10, r10, _PAGE_ACCESSED
417#endif
418 mfspr r11, SPRN_MD_TWC /* get the pte address again */
419 stw r10, 0(r11)
420
421 /* The Linux PTE won't go exactly into the MMU TLB.
422 * Software indicator bits 21, 22 and 28 must be clear.
423 * Software indicator bits 24, 25, 26, and 27 must be
424 * set. All other Linux PTE bits control the behavior
425 * of the MMU.
426 */
4272: li r11, 0x00f0
428 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
429 DO_8xx_CPU6(0x3d80, r3)
430 mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
431
432 mfspr r10, SPRN_M_TW /* Restore registers */
433 lwz r11, 0(r0)
434 mtcr r11
435 lwz r11, 4(r0)
436 lwz r3, 8(r0)
437 rfi
438
439/* This is an instruction TLB error on the MPC8xx. This could be due
440 * to many reasons, such as executing guarded memory or illegal instruction
441 * addresses. There is nothing to do but handle a big time error fault.
442 */
443 . = 0x1300
444InstructionTLBError:
445 b InstructionAccess
446
447LoadLargeDTLB:
448 li r12, 0
449 lwz r11, 0(r10) /* Get the level 1 entry */
450 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
451 beq 3f /* If zero, don't try to find a pte */
452
453 /* We have a pte table, so load fetch the pte from the table.
454 */
455 ori r11, r11, 1 /* Set valid bit in physical L2 page */
456 DO_8xx_CPU6(0x3b80, r3)
457 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
458 mfspr r10, SPRN_MD_TWC /* ....and get the pte address */
459 lwz r10, 0(r10) /* Get the pte */
460
461 /* Insert the Guarded flag into the TWC from the Linux PTE.
462 * It is bit 27 of both the Linux PTE and the TWC (at least
463 * I got that right :-). It will be better when we can put
464 * this into the Linux pgd/pmd and load it in the operation
465 * above.
466 */
467 rlwimi r11, r10, 0, 27, 27
468
469 rlwimi r12, r10, 0, 0, 9 /* extract phys. addr */
470 mfspr r3, SPRN_MD_EPN
471 rlwinm r3, r3, 0, 0, 9 /* extract virtual address */
472 tophys(r3, r3)
473 cmpw r3, r12 /* only use 8M page if it is a direct
474 kernel mapping */
475 bne 1f
476 ori r11, r11, MD_PS8MEG
477 li r12, 1
478 b 2f
4791:
480 li r12, 0 /* can't use 8MB TLB, so zero r12. */
4812:
482 DO_8xx_CPU6(0x3b80, r3)
483 mtspr SPRN_MD_TWC, r11
484
485 /* The Linux PTE won't go exactly into the MMU TLB.
486 * Software indicator bits 21, 22 and 28 must be clear.
487 * Software indicator bits 24, 25, 26, and 27 must be
488 * set. All other Linux PTE bits control the behavior
489 * of the MMU.
490 */
4913: li r11, 0x00f0
492 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
493 cmpwi r12, 1
494 bne 4f
495 ori r10, r10, 0x8
496
497 mfspr r12, SPRN_MD_EPN
498 lis r3, 0xff80 /* 10-19 must be clear for 8MB TLB */
499 ori r3, r3, 0x0fff
500 and r12, r3, r12
501 DO_8xx_CPU6(0x3780, r3)
502 mtspr SPRN_MD_EPN, r12
503
504 lis r3, 0xff80 /* 10-19 must be clear for 8MB TLB */
505 ori r3, r3, 0x0fff
506 and r10, r3, r10
5074:
508 DO_8xx_CPU6(0x3d80, r3)
509 mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
510
511 mfspr r10, SPRN_M_TW /* Restore registers */
512 lwz r11, 0(r0)
513 mtcr r11
514 lwz r11, 4(r0)
515
516 lwz r12, 16(r0)
517 lwz r3, 8(r0)
518 rfi
519
520/* This is the data TLB error on the MPC8xx. This could be due to
521 * many reasons, including a dirty update to a pte. We can catch that
522 * one here, but anything else is an error. First, we track down the
523 * Linux pte. If it is valid, write access is allowed, but the
524 * page dirty bit is not set, we will set it and reload the TLB. For
525 * any other case, we bail out to a higher level function that can
526 * handle it.
527 */
528 . = 0x1400
529DataTLBError:
530#ifdef CONFIG_8xx_CPU6
531 stw r3, 8(r0)
532#endif
533 DO_8xx_CPU6(0x3f80, r3)
534 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
535 mfcr r10
536 stw r10, 0(r0)
537 stw r11, 4(r0)
538
539 /* First, make sure this was a store operation.
540 */
541 mfspr r10, SPRN_DSISR
542 andis. r11, r10, 0x0200 /* If set, indicates store op */
543 beq 2f
544
545 /* The EA of a data TLB miss is automatically stored in the MD_EPN
546 * register. The EA of a data TLB error is automatically stored in
547 * the DAR, but not the MD_EPN register. We must copy the 20 most
548 * significant bits of the EA from the DAR to MD_EPN before we
549 * start walking the page tables. We also need to copy the CASID
550 * value from the M_CASID register.
551 * Addendum: The EA of a data TLB error is _supposed_ to be stored
552 * in DAR, but it seems that this doesn't happen in some cases, such
553 * as when the error is due to a dcbi instruction to a page with a
554 * TLB that doesn't have the changed bit set. In such cases, there
555 * does not appear to be any way to recover the EA of the error
556 * since it is neither in DAR nor MD_EPN. As a workaround, the
557 * _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs
558 * are initialized in mapin_ram(). This will avoid the problem,
559 * assuming we only use the dcbi instruction on kernel addresses.
560 */
561 mfspr r10, SPRN_DAR
562 rlwinm r11, r10, 0, 0, 19
563 ori r11, r11, MD_EVALID
564 mfspr r10, SPRN_M_CASID
565 rlwimi r11, r10, 0, 28, 31
566 DO_8xx_CPU6(0x3780, r3)
567 mtspr SPRN_MD_EPN, r11
568
569 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
570
571 /* If we are faulting a kernel address, we have to use the
572 * kernel page tables.
573 */
574 andi. r11, r10, 0x0800
575 beq 3f
576 lis r11, swapper_pg_dir@h
577 ori r11, r11, swapper_pg_dir@l
578 rlwimi r10, r11, 0, 2, 19
5793:
580 lwz r11, 0(r10) /* Get the level 1 entry */
581 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
582 beq 2f /* If zero, bail */
583
584 /* We have a pte table, so fetch the pte from the table.
585 */
586 ori r11, r11, 1 /* Set valid bit in physical L2 page */
587 DO_8xx_CPU6(0x3b80, r3)
588 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
589 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
590 lwz r10, 0(r11) /* Get the pte */
591
592 andi. r11, r10, _PAGE_RW /* Is it writeable? */
593 beq 2f /* Bail out if not */
594
595 /* Update 'changed', among others.
596 */
597#ifdef CONFIG_SWAP
598 ori r10, r10, _PAGE_DIRTY|_PAGE_HWWRITE
599 /* do not set the _PAGE_ACCESSED bit of a non-present page */
600 andi. r11, r10, _PAGE_PRESENT
601 beq 4f
602 ori r10, r10, _PAGE_ACCESSED
6034:
604#else
605 ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
606#endif
607 mfspr r11, SPRN_MD_TWC /* Get pte address again */
608 stw r10, 0(r11) /* and update pte in table */
609
610 /* The Linux PTE won't go exactly into the MMU TLB.
611 * Software indicator bits 21, 22 and 28 must be clear.
612 * Software indicator bits 24, 25, 26, and 27 must be
613 * set. All other Linux PTE bits control the behavior
614 * of the MMU.
615 */
616 li r11, 0x00f0
617 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
618 DO_8xx_CPU6(0x3d80, r3)
619 mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
620
621 mfspr r10, SPRN_M_TW /* Restore registers */
622 lwz r11, 0(r0)
623 mtcr r11
624 lwz r11, 4(r0)
625#ifdef CONFIG_8xx_CPU6
626 lwz r3, 8(r0)
627#endif
628 rfi
6292:
630 mfspr r10, SPRN_M_TW /* Restore registers */
631 lwz r11, 0(r0)
632 mtcr r11
633 lwz r11, 4(r0)
634#ifdef CONFIG_8xx_CPU6
635 lwz r3, 8(r0)
636#endif
637 b DataAccess
638
639 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
640 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
641 EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
642 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
643 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
644 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
645 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
646
647/* On the MPC8xx, these next four traps are used for development
648 * support of breakpoints and such. Someday I will get around to
649 * using them.
650 */
651 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
652 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
653 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
654 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
655
656 . = 0x2000
657
658 .globl giveup_fpu
659giveup_fpu:
660 blr
661
662/*
663 * This is where the main kernel code starts.
664 */
665start_here:
666 /* ptr to current */
667 lis r2,init_task@h
668 ori r2,r2,init_task@l
669
670 /* ptr to phys current thread */
671 tophys(r4,r2)
672 addi r4,r4,THREAD /* init task's THREAD */
673 mtspr SPRN_SPRG3,r4
674 li r3,0
675 mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */
676
677 /* stack */
678 lis r1,init_thread_union@ha
679 addi r1,r1,init_thread_union@l
680 li r0,0
681 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
682
683 bl early_init /* We have to do this with MMU on */
684
685/*
686 * Decide what sort of machine this is and initialize the MMU.
687 */
688 mr r3,r31
689 mr r4,r30
690 mr r5,r29
691 mr r6,r28
692 mr r7,r27
693 bl machine_init
694 bl MMU_init
695
696/*
697 * Go back to running unmapped so we can load up new values
698 * and change to using our exception vectors.
699 * On the 8xx, all we have to do is invalidate the TLB to clear
700 * the old 8M byte TLB mappings and load the page table base register.
701 */
702 /* The right way to do this would be to track it down through
703 * init's THREAD like the context switch code does, but this is
704 * easier......until someone changes init's static structures.
705 */
706 lis r6, swapper_pg_dir@h
707 ori r6, r6, swapper_pg_dir@l
708 tophys(r6,r6)
709#ifdef CONFIG_8xx_CPU6
710 lis r4, cpu6_errata_word@h
711 ori r4, r4, cpu6_errata_word@l
712 li r3, 0x3980
713 stw r3, 12(r4)
714 lwz r3, 12(r4)
715#endif
716 mtspr SPRN_M_TWB, r6
717 lis r4,2f@h
718 ori r4,r4,2f@l
719 tophys(r4,r4)
720 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
721 mtspr SPRN_SRR0,r4
722 mtspr SPRN_SRR1,r3
723 rfi
724/* Load up the kernel context */
7252:
726 SYNC /* Force all PTE updates to finish */
727 tlbia /* Clear all TLB entries */
728 sync /* wait for tlbia/tlbie to finish */
729 TLBSYNC /* ... on all CPUs */
730
731 /* set up the PTE pointers for the Abatron bdiGDB.
732 */
733 tovirt(r6,r6)
734 lis r5, abatron_pteptrs@h
735 ori r5, r5, abatron_pteptrs@l
736 stw r5, 0xf0(r0) /* Must match your Abatron config file */
737 tophys(r5,r5)
738 stw r6, 0(r5)
739
740/* Now turn on the MMU for real! */
741 li r4,MSR_KERNEL
742 lis r3,start_kernel@h
743 ori r3,r3,start_kernel@l
744 mtspr SPRN_SRR0,r3
745 mtspr SPRN_SRR1,r4
746 rfi /* enable MMU and jump to start_kernel */
747
748/* Set up the initial MMU state so we can do the first level of
749 * kernel initialization. This maps the first 8 MBytes of memory 1:1
750 * virtual to physical. Also, set the cache mode since that is defined
751 * by TLB entries and perform any additional mapping (like of the IMMR).
752 * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel,
753 * 24 Mbytes of data, and the 8M IMMR space. Anything not covered by
754 * these mappings is mapped by page tables.
755 */
756initial_mmu:
757 tlbia /* Invalidate all TLB entries */
758#ifdef CONFIG_PIN_TLB
759 lis r8, MI_RSV4I@h
760 ori r8, r8, 0x1c00
761#else
762 li r8, 0
763#endif
764 mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
765
766#ifdef CONFIG_PIN_TLB
767 lis r10, (MD_RSV4I | MD_RESETVAL)@h
768 ori r10, r10, 0x1c00
769 mr r8, r10
770#else
771 lis r10, MD_RESETVAL@h
772#endif
773#ifndef CONFIG_8xx_COPYBACK
774 oris r10, r10, MD_WTDEF@h
775#endif
776 mtspr SPRN_MD_CTR, r10 /* Set data TLB control */
777
778 /* Now map the lower 8 Meg into the TLBs. For this quick hack,
779 * we can load the instruction and data TLB registers with the
780 * same values.
781 */
782 lis r8, KERNELBASE@h /* Create vaddr for TLB */
783 ori r8, r8, MI_EVALID /* Mark it valid */
784 mtspr SPRN_MI_EPN, r8
785 mtspr SPRN_MD_EPN, r8
786 li r8, MI_PS8MEG /* Set 8M byte page */
787 ori r8, r8, MI_SVALID /* Make it valid */
788 mtspr SPRN_MI_TWC, r8
789 mtspr SPRN_MD_TWC, r8
790 li r8, MI_BOOTINIT /* Create RPN for address 0 */
791 mtspr SPRN_MI_RPN, r8 /* Store TLB entry */
792 mtspr SPRN_MD_RPN, r8
793 lis r8, MI_Kp@h /* Set the protection mode */
794 mtspr SPRN_MI_AP, r8
795 mtspr SPRN_MD_AP, r8
796
797 /* Map another 8 MByte at the IMMR to get the processor
798 * internal registers (among other things).
799 */
800#ifdef CONFIG_PIN_TLB
801 addi r10, r10, 0x0100
802 mtspr SPRN_MD_CTR, r10
803#endif
804 mfspr r9, 638 /* Get current IMMR */
805 andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */
806
807 mr r8, r9 /* Create vaddr for TLB */
808 ori r8, r8, MD_EVALID /* Mark it valid */
809 mtspr SPRN_MD_EPN, r8
810 li r8, MD_PS8MEG /* Set 8M byte page */
811 ori r8, r8, MD_SVALID /* Make it valid */
812 mtspr SPRN_MD_TWC, r8
813 mr r8, r9 /* Create paddr for TLB */
814 ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
815 mtspr SPRN_MD_RPN, r8
816
817#ifdef CONFIG_PIN_TLB
818 /* Map two more 8M kernel data pages.
819 */
820 addi r10, r10, 0x0100
821 mtspr SPRN_MD_CTR, r10
822
823 lis r8, KERNELBASE@h /* Create vaddr for TLB */
824 addis r8, r8, 0x0080 /* Add 8M */
825 ori r8, r8, MI_EVALID /* Mark it valid */
826 mtspr SPRN_MD_EPN, r8
827 li r9, MI_PS8MEG /* Set 8M byte page */
828 ori r9, r9, MI_SVALID /* Make it valid */
829 mtspr SPRN_MD_TWC, r9
830 li r11, MI_BOOTINIT /* Create RPN for address 0 */
831 addis r11, r11, 0x0080 /* Add 8M */
832 mtspr SPRN_MD_RPN, r11
833
834 addi r10, r10, 0x0100
835 mtspr SPRN_MD_CTR, r10
836
837 addis r8, r8, 0x0080 /* Add 8M */
838 mtspr SPRN_MD_EPN, r8
839 mtspr SPRN_MD_TWC, r9
840 addis r11, r11, 0x0080 /* Add 8M */
841 mtspr SPRN_MD_RPN, r11
842#endif
843
844 /* Since the cache is enabled according to the information we
845 * just loaded into the TLB, invalidate and enable the caches here.
846 * We should probably check/set other modes....later.
847 */
848 lis r8, IDC_INVALL@h
849 mtspr SPRN_IC_CST, r8
850 mtspr SPRN_DC_CST, r8
851 lis r8, IDC_ENABLE@h
852 mtspr SPRN_IC_CST, r8
853#ifdef CONFIG_8xx_COPYBACK
854 mtspr SPRN_DC_CST, r8
855#else
856 /* For a debug option, I left this here to easily enable
857 * the write through cache mode
858 */
859 lis r8, DC_SFWT@h
860 mtspr SPRN_DC_CST, r8
861 lis r8, IDC_ENABLE@h
862 mtspr SPRN_DC_CST, r8
863#endif
864 blr
865
866
867/*
868 * Set up to use a given MMU context.
869 * r3 is context number, r4 is PGD pointer.
870 *
871 * We place the physical address of the new task page directory loaded
872 * into the MMU base register, and set the ASID compare register with
873 * the new "context."
874 */
875_GLOBAL(set_context)
876
877#ifdef CONFIG_BDI_SWITCH
878 /* Context switch the PTE pointer for the Abatron BDI2000.
879 * The PGDIR is passed as second argument.
880 */
881 lis r5, KERNELBASE@h
882 lwz r5, 0xf0(r5)
883 stw r4, 0x4(r5)
884#endif
885
886#ifdef CONFIG_8xx_CPU6
887 lis r6, cpu6_errata_word@h
888 ori r6, r6, cpu6_errata_word@l
889 tophys (r4, r4)
890 li r7, 0x3980
891 stw r7, 12(r6)
892 lwz r7, 12(r6)
893 mtspr SPRN_M_TWB, r4 /* Update MMU base address */
894 li r7, 0x3380
895 stw r7, 12(r6)
896 lwz r7, 12(r6)
897 mtspr SPRN_M_CASID, r3 /* Update context */
898#else
899 mtspr SPRN_M_CASID,r3 /* Update context */
900 tophys (r4, r4)
901 mtspr SPRN_M_TWB, r4 /* and pgd */
902#endif
903 SYNC
904 blr
905
906#ifdef CONFIG_8xx_CPU6
907/* It's here because it is unique to the 8xx.
908 * It is important we get called with interrupts disabled. I used to
909 * do that, but it appears that all code that calls this already had
910 * interrupt disabled.
911 */
912 .globl set_dec_cpu6
913set_dec_cpu6:
914 lis r7, cpu6_errata_word@h
915 ori r7, r7, cpu6_errata_word@l
916 li r4, 0x2c00
917 stw r4, 8(r7)
918 lwz r4, 8(r7)
919 mtspr 22, r3 /* Update Decrementer */
920 SYNC
921 blr
922#endif
923
924/*
925 * We put a few things here that have to be page-aligned.
926 * This stuff goes at the beginning of the data segment,
927 * which is page-aligned.
928 */
929 .data
930 .globl sdata
931sdata:
932 .globl empty_zero_page
933empty_zero_page:
934 .space 4096
935
936 .globl swapper_pg_dir
937swapper_pg_dir:
938 .space 4096
939
940/*
941 * This space gets a copy of optional info passed to us by the bootstrap
942 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
943 */
944 .globl cmd_line
945cmd_line:
946 .space 512
947
948/* Room for two PTE table poiners, usually the kernel and current user
949 * pointer to their respective root page table (pgdir).
950 */
951abatron_pteptrs:
952 .space 8
953
954#ifdef CONFIG_8xx_CPU6
955 .globl cpu6_errata_word
956cpu6_errata_word:
957 .space 16
958#endif
959
diff --git a/arch/ppc/kernel/head_booke.h b/arch/ppc/kernel/head_booke.h
deleted file mode 100644
index 166d597b6db2..000000000000
--- a/arch/ppc/kernel/head_booke.h
+++ /dev/null
@@ -1,308 +0,0 @@
1#ifndef __HEAD_BOOKE_H__
2#define __HEAD_BOOKE_H__
3
4/*
5 * Macros used for common Book-e exception handling
6 */
7
8#define SET_IVOR(vector_number, vector_label) \
9 li r26,vector_label@l; \
10 mtspr SPRN_IVOR##vector_number,r26; \
11 sync
12
13#define NORMAL_EXCEPTION_PROLOG \
14 mtspr SPRN_SPRG0,r10; /* save two registers to work with */\
15 mtspr SPRN_SPRG1,r11; \
16 mtspr SPRN_SPRG4W,r1; \
17 mfcr r10; /* save CR in r10 for now */\
18 mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
19 andi. r11,r11,MSR_PR; \
20 beq 1f; \
21 mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\
22 lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\
23 addi r1,r1,THREAD_SIZE; \
241: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\
25 mr r11,r1; \
26 stw r10,_CCR(r11); /* save various registers */\
27 stw r12,GPR12(r11); \
28 stw r9,GPR9(r11); \
29 mfspr r10,SPRN_SPRG0; \
30 stw r10,GPR10(r11); \
31 mfspr r12,SPRN_SPRG1; \
32 stw r12,GPR11(r11); \
33 mflr r10; \
34 stw r10,_LINK(r11); \
35 mfspr r10,SPRN_SPRG4R; \
36 mfspr r12,SPRN_SRR0; \
37 stw r10,GPR1(r11); \
38 mfspr r9,SPRN_SRR1; \
39 stw r10,0(r11); \
40 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
41 stw r0,GPR0(r11); \
42 SAVE_4GPRS(3, r11); \
43 SAVE_2GPRS(7, r11)
44
45/* To handle the additional exception priority levels on 40x and Book-E
46 * processors we allocate a 4k stack per additional priority level. The various
47 * head_xxx.S files allocate space (exception_stack_top) for each priority's
48 * stack times the number of CPUs
49 *
50 * On 40x critical is the only additional level
51 * On 44x/e500 we have critical and machine check
52 * On e200 we have critical and debug (machine check occurs via critical)
53 *
54 * Additionally we reserve a SPRG for each priority level so we can free up a
55 * GPR to use as the base for indirect access to the exception stacks. This
56 * is necessary since the MMU is always on, for Book-E parts, and the stacks
57 * are offset from KERNELBASE.
58 *
59 */
60#define BOOKE_EXCEPTION_STACK_SIZE (8192)
61
62/* CRIT_SPRG only used in critical exception handling */
63#define CRIT_SPRG SPRN_SPRG2
64/* MCHECK_SPRG only used in machine check exception handling */
65#define MCHECK_SPRG SPRN_SPRG6W
66
67#define MCHECK_STACK_TOP (exception_stack_top - 4096)
68#define CRIT_STACK_TOP (exception_stack_top)
69
70/* only on e200 for now */
71#define DEBUG_STACK_TOP (exception_stack_top - 4096)
72#define DEBUG_SPRG SPRN_SPRG6W
73
74#ifdef CONFIG_SMP
75#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
76 mfspr r8,SPRN_PIR; \
77 mulli r8,r8,BOOKE_EXCEPTION_STACK_SIZE; \
78 neg r8,r8; \
79 addis r8,r8,level##_STACK_TOP@ha; \
80 addi r8,r8,level##_STACK_TOP@l
81#else
82#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
83 lis r8,level##_STACK_TOP@h; \
84 ori r8,r8,level##_STACK_TOP@l
85#endif
86
87/*
88 * Exception prolog for critical/machine check exceptions. This is a
89 * little different from the normal exception prolog above since a
90 * critical/machine check exception can potentially occur at any point
91 * during normal exception processing. Thus we cannot use the same SPRG
92 * registers as the normal prolog above. Instead we use a portion of the
93 * critical/machine check exception stack at low physical addresses.
94 */
95#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, exc_level_srr0, exc_level_srr1) \
96 mtspr exc_level##_SPRG,r8; \
97 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \
98 stw r10,GPR10-INT_FRAME_SIZE(r8); \
99 stw r11,GPR11-INT_FRAME_SIZE(r8); \
100 mfcr r10; /* save CR in r10 for now */\
101 mfspr r11,exc_level_srr1; /* check whether user or kernel */\
102 andi. r11,r11,MSR_PR; \
103 mr r11,r8; \
104 mfspr r8,exc_level##_SPRG; \
105 beq 1f; \
106 /* COMING FROM USER MODE */ \
107 mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\
108 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
109 addi r11,r11,THREAD_SIZE; \
1101: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
111 stw r10,_CCR(r11); /* save various registers */\
112 stw r12,GPR12(r11); \
113 stw r9,GPR9(r11); \
114 mflr r10; \
115 stw r10,_LINK(r11); \
116 mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\
117 stw r12,_DEAR(r11); /* since they may have had stuff */\
118 mfspr r9,SPRN_ESR; /* in them at the point where the */\
119 stw r9,_ESR(r11); /* exception was taken */\
120 mfspr r12,exc_level_srr0; \
121 stw r1,GPR1(r11); \
122 mfspr r9,exc_level_srr1; \
123 stw r1,0(r11); \
124 mr r1,r11; \
125 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
126 stw r0,GPR0(r11); \
127 SAVE_4GPRS(3, r11); \
128 SAVE_2GPRS(7, r11)
129
130#define CRITICAL_EXCEPTION_PROLOG \
131 EXC_LEVEL_EXCEPTION_PROLOG(CRIT, SPRN_CSRR0, SPRN_CSRR1)
132#define DEBUG_EXCEPTION_PROLOG \
133 EXC_LEVEL_EXCEPTION_PROLOG(DEBUG, SPRN_DSRR0, SPRN_DSRR1)
134#define MCHECK_EXCEPTION_PROLOG \
135 EXC_LEVEL_EXCEPTION_PROLOG(MCHECK, SPRN_MCSRR0, SPRN_MCSRR1)
136
137/*
138 * Exception vectors.
139 */
140#define START_EXCEPTION(label) \
141 .align 5; \
142label:
143
144#define FINISH_EXCEPTION(func) \
145 bl transfer_to_handler_full; \
146 .long func; \
147 .long ret_from_except_full
148
149#define EXCEPTION(n, label, hdlr, xfer) \
150 START_EXCEPTION(label); \
151 NORMAL_EXCEPTION_PROLOG; \
152 addi r3,r1,STACK_FRAME_OVERHEAD; \
153 xfer(n, hdlr)
154
155#define CRITICAL_EXCEPTION(n, label, hdlr) \
156 START_EXCEPTION(label); \
157 CRITICAL_EXCEPTION_PROLOG; \
158 addi r3,r1,STACK_FRAME_OVERHEAD; \
159 EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
160 NOCOPY, crit_transfer_to_handler, \
161 ret_from_crit_exc)
162
163#define MCHECK_EXCEPTION(n, label, hdlr) \
164 START_EXCEPTION(label); \
165 MCHECK_EXCEPTION_PROLOG; \
166 mfspr r5,SPRN_ESR; \
167 stw r5,_ESR(r11); \
168 addi r3,r1,STACK_FRAME_OVERHEAD; \
169 EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
170 NOCOPY, mcheck_transfer_to_handler, \
171 ret_from_mcheck_exc)
172
173#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \
174 li r10,trap; \
175 stw r10,TRAP(r11); \
176 lis r10,msr@h; \
177 ori r10,r10,msr@l; \
178 copyee(r10, r9); \
179 bl tfer; \
180 .long hdlr; \
181 .long ret
182
183#define COPY_EE(d, s) rlwimi d,s,0,16,16
184#define NOCOPY(d, s)
185
186#define EXC_XFER_STD(n, hdlr) \
187 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \
188 ret_from_except_full)
189
190#define EXC_XFER_LITE(n, hdlr) \
191 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
192 ret_from_except)
193
194#define EXC_XFER_EE(n, hdlr) \
195 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \
196 ret_from_except_full)
197
198#define EXC_XFER_EE_LITE(n, hdlr) \
199 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \
200 ret_from_except)
201
202/* Check for a single step debug exception while in an exception
203 * handler before state has been saved. This is to catch the case
204 * where an instruction that we are trying to single step causes
205 * an exception (eg ITLB/DTLB miss) and thus the first instruction of
206 * the exception handler generates a single step debug exception.
207 *
208 * If we get a debug trap on the first instruction of an exception handler,
209 * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is
210 * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR).
211 * The exception handler was handling a non-critical interrupt, so it will
212 * save (and later restore) the MSR via SPRN_CSRR1, which will still have
213 * the MSR_DE bit set.
214 */
215#define DEBUG_EXCEPTION \
216 START_EXCEPTION(Debug); \
217 CRITICAL_EXCEPTION_PROLOG; \
218 \
219 /* \
220 * If there is a single step or branch-taken exception in an \
221 * exception entry sequence, it was probably meant to apply to \
222 * the code where the exception occurred (since exception entry \
223 * doesn't turn off DE automatically). We simulate the effect \
224 * of turning off DE on entry to an exception handler by turning \
225 * off DE in the CSRR1 value and clearing the debug status. \
226 */ \
227 mfspr r10,SPRN_DBSR; /* check single-step/branch taken */ \
228 andis. r10,r10,DBSR_IC@h; \
229 beq+ 2f; \
230 \
231 lis r10,KERNELBASE@h; /* check if exception in vectors */ \
232 ori r10,r10,KERNELBASE@l; \
233 cmplw r12,r10; \
234 blt+ 2f; /* addr below exception vectors */ \
235 \
236 lis r10,Debug@h; \
237 ori r10,r10,Debug@l; \
238 cmplw r12,r10; \
239 bgt+ 2f; /* addr above exception vectors */ \
240 \
241 /* here it looks like we got an inappropriate debug exception. */ \
2421: rlwinm r9,r9,0,~MSR_DE; /* clear DE in the CSRR1 value */ \
243 lis r10,DBSR_IC@h; /* clear the IC event */ \
244 mtspr SPRN_DBSR,r10; \
245 /* restore state and get out */ \
246 lwz r10,_CCR(r11); \
247 lwz r0,GPR0(r11); \
248 lwz r1,GPR1(r11); \
249 mtcrf 0x80,r10; \
250 mtspr SPRN_CSRR0,r12; \
251 mtspr SPRN_CSRR1,r9; \
252 lwz r9,GPR9(r11); \
253 lwz r12,GPR12(r11); \
254 mtspr CRIT_SPRG,r8; \
255 BOOKE_LOAD_EXC_LEVEL_STACK(CRIT); /* r8 points to the debug stack */ \
256 lwz r10,GPR10-INT_FRAME_SIZE(r8); \
257 lwz r11,GPR11-INT_FRAME_SIZE(r8); \
258 mfspr r8,CRIT_SPRG; \
259 \
260 rfci; \
261 b .; \
262 \
263 /* continue normal handling for a critical exception... */ \
2642: mfspr r4,SPRN_DBSR; \
265 addi r3,r1,STACK_FRAME_OVERHEAD; \
266 EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
267
268#define INSTRUCTION_STORAGE_EXCEPTION \
269 START_EXCEPTION(InstructionStorage) \
270 NORMAL_EXCEPTION_PROLOG; \
271 mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
272 stw r5,_ESR(r11); \
273 mr r4,r12; /* Pass SRR0 as arg2 */ \
274 li r5,0; /* Pass zero as arg3 */ \
275 EXC_XFER_EE_LITE(0x0400, handle_page_fault)
276
277#define ALIGNMENT_EXCEPTION \
278 START_EXCEPTION(Alignment) \
279 NORMAL_EXCEPTION_PROLOG; \
280 mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \
281 stw r4,_DEAR(r11); \
282 addi r3,r1,STACK_FRAME_OVERHEAD; \
283 EXC_XFER_EE(0x0600, alignment_exception)
284
285#define PROGRAM_EXCEPTION \
286 START_EXCEPTION(Program) \
287 NORMAL_EXCEPTION_PROLOG; \
288 mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \
289 stw r4,_ESR(r11); \
290 addi r3,r1,STACK_FRAME_OVERHEAD; \
291 EXC_XFER_STD(0x0700, program_check_exception)
292
293#define DECREMENTER_EXCEPTION \
294 START_EXCEPTION(Decrementer) \
295 NORMAL_EXCEPTION_PROLOG; \
296 lis r0,TSR_DIS@h; /* Setup the DEC interrupt mask */ \
297 mtspr SPRN_TSR,r0; /* Clear the DEC interrupt */ \
298 addi r3,r1,STACK_FRAME_OVERHEAD; \
299 EXC_XFER_LITE(0x0900, timer_interrupt)
300
301#define FP_UNAVAILABLE_EXCEPTION \
302 START_EXCEPTION(FloatingPointUnavailable) \
303 NORMAL_EXCEPTION_PROLOG; \
304 bne load_up_fpu; /* if from user, just load it up */ \
305 addi r3,r1,STACK_FRAME_OVERHEAD; \
306 EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
307
308#endif /* __HEAD_BOOKE_H__ */
diff --git a/arch/ppc/kernel/machine_kexec.c b/arch/ppc/kernel/machine_kexec.c
deleted file mode 100644
index a469ba438cbe..000000000000
--- a/arch/ppc/kernel/machine_kexec.c
+++ /dev/null
@@ -1,118 +0,0 @@
1/*
2 * machine_kexec.c - handle transition of Linux booting another kernel
3 * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
4 *
5 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
6 *
7 * This source code is licensed under the GNU General Public License,
8 * Version 2. See the file COPYING for more details.
9 */
10
11#include <linux/mm.h>
12#include <linux/kexec.h>
13#include <linux/delay.h>
14#include <linux/reboot.h>
15#include <asm/pgtable.h>
16#include <asm/pgalloc.h>
17#include <asm/mmu_context.h>
18#include <asm/io.h>
19#include <asm/hw_irq.h>
20#include <asm/cacheflush.h>
21#include <asm/machdep.h>
22
23typedef NORET_TYPE void (*relocate_new_kernel_t)(
24 unsigned long indirection_page,
25 unsigned long reboot_code_buffer,
26 unsigned long start_address) ATTRIB_NORET;
27
28extern const unsigned char relocate_new_kernel[];
29extern const unsigned int relocate_new_kernel_size;
30
31void machine_shutdown(void)
32{
33 if (ppc_md.machine_shutdown)
34 ppc_md.machine_shutdown();
35}
36
37void machine_crash_shutdown(struct pt_regs *regs)
38{
39 if (ppc_md.machine_crash_shutdown)
40 ppc_md.machine_crash_shutdown();
41}
42
43/*
44 * Do what every setup is needed on image and the
45 * reboot code buffer to allow us to avoid allocations
46 * later.
47 */
48int machine_kexec_prepare(struct kimage *image)
49{
50 if (ppc_md.machine_kexec_prepare)
51 return ppc_md.machine_kexec_prepare(image);
52 /*
53 * Fail if platform doesn't provide its own machine_kexec_prepare
54 * implementation.
55 */
56 return -ENOSYS;
57}
58
59void machine_kexec_cleanup(struct kimage *image)
60{
61 if (ppc_md.machine_kexec_cleanup)
62 ppc_md.machine_kexec_cleanup(image);
63}
64
65/*
66 * Do not allocate memory (or fail in any way) in machine_kexec().
67 * We are past the point of no return, committed to rebooting now.
68 */
69NORET_TYPE void machine_kexec(struct kimage *image)
70{
71 if (ppc_md.machine_kexec)
72 ppc_md.machine_kexec(image);
73 else {
74 /*
75 * Fall back to normal restart if platform doesn't provide
76 * its own kexec function, and user insist to kexec...
77 */
78 machine_restart(NULL);
79 }
80 for(;;);
81}
82
83/*
84 * This is a generic machine_kexec function suitable at least for
85 * non-OpenFirmware embedded platforms.
86 * It merely copies the image relocation code to the control page and
87 * jumps to it.
88 * A platform specific function may just call this one.
89 */
90void machine_kexec_simple(struct kimage *image)
91{
92 unsigned long page_list;
93 unsigned long reboot_code_buffer, reboot_code_buffer_phys;
94 relocate_new_kernel_t rnk;
95
96 /* Interrupts aren't acceptable while we reboot */
97 local_irq_disable();
98
99 page_list = image->head;
100
101 /* we need both effective and real address here */
102 reboot_code_buffer =
103 (unsigned long)page_address(image->control_code_page);
104 reboot_code_buffer_phys = virt_to_phys((void *)reboot_code_buffer);
105
106 /* copy our kernel relocation code to the control code page */
107 memcpy((void *)reboot_code_buffer, relocate_new_kernel,
108 relocate_new_kernel_size);
109
110 flush_icache_range(reboot_code_buffer,
111 reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE);
112 printk(KERN_INFO "Bye!\n");
113
114 /* now call it */
115 rnk = (relocate_new_kernel_t) reboot_code_buffer;
116 (*rnk)(page_list, reboot_code_buffer_phys, image->start);
117}
118
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
deleted file mode 100644
index d5e0dfc9ffec..000000000000
--- a/arch/ppc/kernel/misc.S
+++ /dev/null
@@ -1,868 +0,0 @@
1/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 */
14
15#include <linux/sys.h>
16#include <asm/unistd.h>
17#include <asm/errno.h>
18#include <asm/processor.h>
19#include <asm/page.h>
20#include <asm/cache.h>
21#include <asm/cputable.h>
22#include <asm/mmu.h>
23#include <asm/ppc_asm.h>
24#include <asm/thread_info.h>
25#include <asm/asm-offsets.h>
26
27#ifdef CONFIG_8xx
28#define ISYNC_8xx isync
29#else
30#define ISYNC_8xx
31#endif
32 .text
33
34 .align 5
35_GLOBAL(__delay)
36 cmpwi 0,r3,0
37 mtctr r3
38 beqlr
391: bdnz 1b
40 blr
41
42/*
43 * Returns (address we're running at) - (address we were linked at)
44 * for use before the text and data are mapped to KERNELBASE.
45 */
46_GLOBAL(reloc_offset)
47 mflr r0
48 bl 1f
491: mflr r3
50 lis r4,1b@ha
51 addi r4,r4,1b@l
52 subf r3,r4,r3
53 mtlr r0
54 blr
55
56/*
57 * add_reloc_offset(x) returns x + reloc_offset().
58 */
59_GLOBAL(add_reloc_offset)
60 mflr r0
61 bl 1f
621: mflr r5
63 lis r4,1b@ha
64 addi r4,r4,1b@l
65 subf r5,r4,r5
66 add r3,r3,r5
67 mtlr r0
68 blr
69
70/*
71 * sub_reloc_offset(x) returns x - reloc_offset().
72 */
73_GLOBAL(sub_reloc_offset)
74 mflr r0
75 bl 1f
761: mflr r5
77 lis r4,1b@ha
78 addi r4,r4,1b@l
79 subf r5,r4,r5
80 subf r3,r5,r3
81 mtlr r0
82 blr
83
84/*
85 * reloc_got2 runs through the .got2 section adding an offset
86 * to each entry.
87 */
88_GLOBAL(reloc_got2)
89 mflr r11
90 lis r7,__got2_start@ha
91 addi r7,r7,__got2_start@l
92 lis r8,__got2_end@ha
93 addi r8,r8,__got2_end@l
94 subf r8,r7,r8
95 srwi. r8,r8,2
96 beqlr
97 mtctr r8
98 bl 1f
991: mflr r0
100 lis r4,1b@ha
101 addi r4,r4,1b@l
102 subf r0,r4,r0
103 add r7,r0,r7
1042: lwz r0,0(r7)
105 add r0,r0,r3
106 stw r0,0(r7)
107 addi r7,r7,4
108 bdnz 2b
109 mtlr r11
110 blr
111
112/*
113 * call_setup_cpu - call the setup_cpu function for this cpu
114 * r3 = data offset, r24 = cpu number
115 *
116 * Setup function is called with:
117 * r3 = data offset
118 * r4 = ptr to CPU spec (relocated)
119 */
120_GLOBAL(call_setup_cpu)
121 addis r4,r3,cur_cpu_spec@ha
122 addi r4,r4,cur_cpu_spec@l
123 lwz r4,0(r4)
124 add r4,r4,r3
125 lwz r5,CPU_SPEC_SETUP(r4)
126 cmpi 0,r5,0
127 add r5,r5,r3
128 beqlr
129 mtctr r5
130 bctr
131
132/*
133 * complement mask on the msr then "or" some values on.
134 * _nmask_and_or_msr(nmask, value_to_or)
135 */
136_GLOBAL(_nmask_and_or_msr)
137 mfmsr r0 /* Get current msr */
138 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
139 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
140 SYNC /* Some chip revs have problems here... */
141 mtmsr r0 /* Update machine state */
142 isync
143 blr /* Done */
144
145
146/*
147 * Flush MMU TLB
148 */
149_GLOBAL(_tlbia)
150#if defined(CONFIG_40x)
151 sync /* Flush to memory before changing mapping */
152 tlbia
153 isync /* Flush shadow TLB */
154#elif defined(CONFIG_44x)
155 li r3,0
156 sync
157
158 /* Load high watermark */
159 lis r4,tlb_44x_hwater@ha
160 lwz r5,tlb_44x_hwater@l(r4)
161
1621: tlbwe r3,r3,PPC44x_TLB_PAGEID
163 addi r3,r3,1
164 cmpw 0,r3,r5
165 ble 1b
166
167 isync
168#else /* !(CONFIG_40x || CONFIG_44x) */
169#if defined(CONFIG_SMP)
170 rlwinm r8,r1,0,0,18
171 lwz r8,TI_CPU(r8)
172 oris r8,r8,10
173 mfmsr r10
174 SYNC
175 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
176 rlwinm r0,r0,0,28,26 /* clear DR */
177 mtmsr r0
178 SYNC_601
179 isync
180 lis r9,mmu_hash_lock@h
181 ori r9,r9,mmu_hash_lock@l
182 tophys(r9,r9)
18310: lwarx r7,0,r9
184 cmpwi 0,r7,0
185 bne- 10b
186 stwcx. r8,0,r9
187 bne- 10b
188 sync
189 tlbia
190 sync
191 TLBSYNC
192 li r0,0
193 stw r0,0(r9) /* clear mmu_hash_lock */
194 mtmsr r10
195 SYNC_601
196 isync
197#else /* CONFIG_SMP */
198 sync
199 tlbia
200 sync
201#endif /* CONFIG_SMP */
202#endif /* ! defined(CONFIG_40x) */
203 blr
204
205/*
206 * Flush MMU TLB for a particular address
207 */
208_GLOBAL(_tlbie)
209#if defined(CONFIG_40x)
210 /* We run the search with interrupts disabled because we have to change
211 * the PID and I don't want to preempt when that happens.
212 */
213 mfmsr r5
214 mfspr r6,SPRN_PID
215 wrteei 0
216 mtspr SPRN_PID,r4
217 tlbsx. r3, 0, r3
218 mtspr SPRN_PID,r6
219 wrtee r5
220 bne 10f
221 sync
222 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
223 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
224 * the TLB entry. */
225 tlbwe r3, r3, TLB_TAG
226 isync
22710:
228#elif defined(CONFIG_44x)
229 mfspr r5,SPRN_MMUCR
230 rlwimi r5,r4,0,24,31 /* Set TID */
231
232 /* We have to run the search with interrupts disabled, even critical
233 * and debug interrupts (in fact the only critical exceptions we have
234 * are debug and machine check). Otherwise an interrupt which causes
235 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
236 mfmsr r4
237 lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
238 addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
239 andc r6,r4,r6
240 mtmsr r6
241 mtspr SPRN_MMUCR,r5
242 tlbsx. r3, 0, r3
243 mtmsr r4
244 bne 10f
245 sync
246 /* There are only 64 TLB entries, so r3 < 64,
247 * which means bit 22, is clear. Since 22 is
248 * the V bit in the TLB_PAGEID, loading this
249 * value will invalidate the TLB entry.
250 */
251 tlbwe r3, r3, PPC44x_TLB_PAGEID
252 isync
25310:
254#else /* !(CONFIG_40x || CONFIG_44x) */
255#if defined(CONFIG_SMP)
256 rlwinm r8,r1,0,0,18
257 lwz r8,TI_CPU(r8)
258 oris r8,r8,11
259 mfmsr r10
260 SYNC
261 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
262 rlwinm r0,r0,0,28,26 /* clear DR */
263 mtmsr r0
264 SYNC_601
265 isync
266 lis r9,mmu_hash_lock@h
267 ori r9,r9,mmu_hash_lock@l
268 tophys(r9,r9)
26910: lwarx r7,0,r9
270 cmpwi 0,r7,0
271 bne- 10b
272 stwcx. r8,0,r9
273 bne- 10b
274 eieio
275 tlbie r3
276 sync
277 TLBSYNC
278 li r0,0
279 stw r0,0(r9) /* clear mmu_hash_lock */
280 mtmsr r10
281 SYNC_601
282 isync
283#else /* CONFIG_SMP */
284 tlbie r3
285 sync
286#endif /* CONFIG_SMP */
287#endif /* ! CONFIG_40x */
288 blr
289
290/*
291 * Flush instruction cache.
292 * This is a no-op on the 601.
293 */
294_GLOBAL(flush_instruction_cache)
295#if defined(CONFIG_8xx)
296 isync
297 lis r5, IDC_INVALL@h
298 mtspr SPRN_IC_CST, r5
299#elif defined(CONFIG_4xx)
300#ifdef CONFIG_403GCX
301 li r3, 512
302 mtctr r3
303 lis r4, KERNELBASE@h
3041: iccci 0, r4
305 addi r4, r4, 16
306 bdnz 1b
307#else
308 lis r3, KERNELBASE@h
309 iccci 0,r3
310#endif
311#else
312 mfspr r3,SPRN_PVR
313 rlwinm r3,r3,16,16,31
314 cmpwi 0,r3,1
315 beqlr /* for 601, do nothing */
316 /* 603/604 processor - use invalidate-all bit in HID0 */
317 mfspr r3,SPRN_HID0
318 ori r3,r3,HID0_ICFI
319 mtspr SPRN_HID0,r3
320#endif /* CONFIG_8xx/4xx */
321 isync
322 blr
323
324/*
325 * Write any modified data cache blocks out to memory
326 * and invalidate the corresponding instruction cache blocks.
327 * This is a no-op on the 601.
328 *
329 * __flush_icache_range(unsigned long start, unsigned long stop)
330 */
331_GLOBAL(__flush_icache_range)
332BEGIN_FTR_SECTION
333 blr /* for 601, do nothing */
334END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
335 li r5,L1_CACHE_BYTES-1
336 andc r3,r3,r5
337 subf r4,r3,r4
338 add r4,r4,r5
339 srwi. r4,r4,L1_CACHE_SHIFT
340 beqlr
341 mtctr r4
342 mr r6,r3
3431: dcbst 0,r3
344 addi r3,r3,L1_CACHE_BYTES
345 bdnz 1b
346 sync /* wait for dcbst's to get to ram */
347 mtctr r4
3482: icbi 0,r6
349 addi r6,r6,L1_CACHE_BYTES
350 bdnz 2b
351 sync /* additional sync needed on g4 */
352 isync
353 blr
354/*
355 * Write any modified data cache blocks out to memory.
356 * Does not invalidate the corresponding cache lines (especially for
357 * any corresponding instruction cache).
358 *
359 * clean_dcache_range(unsigned long start, unsigned long stop)
360 */
361_GLOBAL(clean_dcache_range)
362 li r5,L1_CACHE_BYTES-1
363 andc r3,r3,r5
364 subf r4,r3,r4
365 add r4,r4,r5
366 srwi. r4,r4,L1_CACHE_SHIFT
367 beqlr
368 mtctr r4
369
3701: dcbst 0,r3
371 addi r3,r3,L1_CACHE_BYTES
372 bdnz 1b
373 sync /* wait for dcbst's to get to ram */
374 blr
375
376/*
377 * Write any modified data cache blocks out to memory and invalidate them.
378 * Does not invalidate the corresponding instruction cache blocks.
379 *
380 * flush_dcache_range(unsigned long start, unsigned long stop)
381 */
382_GLOBAL(flush_dcache_range)
383 li r5,L1_CACHE_BYTES-1
384 andc r3,r3,r5
385 subf r4,r3,r4
386 add r4,r4,r5
387 srwi. r4,r4,L1_CACHE_SHIFT
388 beqlr
389 mtctr r4
390
3911: dcbf 0,r3
392 addi r3,r3,L1_CACHE_BYTES
393 bdnz 1b
394 sync /* wait for dcbst's to get to ram */
395 blr
396
397/*
398 * Like above, but invalidate the D-cache. This is used by the 8xx
399 * to invalidate the cache so the PPC core doesn't get stale data
400 * from the CPM (no cache snooping here :-).
401 *
402 * invalidate_dcache_range(unsigned long start, unsigned long stop)
403 */
404_GLOBAL(invalidate_dcache_range)
405 li r5,L1_CACHE_BYTES-1
406 andc r3,r3,r5
407 subf r4,r3,r4
408 add r4,r4,r5
409 srwi. r4,r4,L1_CACHE_SHIFT
410 beqlr
411 mtctr r4
412
4131: dcbi 0,r3
414 addi r3,r3,L1_CACHE_BYTES
415 bdnz 1b
416 sync /* wait for dcbi's to get to ram */
417 blr
418
419#ifdef CONFIG_NOT_COHERENT_CACHE
420/*
421 * 40x cores have 8K or 16K dcache and 32 byte line size.
422 * 44x has a 32K dcache and 32 byte line size.
423 * 8xx has 1, 2, 4, 8K variants.
424 * For now, cover the worst case of the 44x.
425 * Must be called with external interrupts disabled.
426 */
427#define CACHE_NWAYS 64
428#define CACHE_NLINES 16
429
430_GLOBAL(flush_dcache_all)
431 li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
432 mtctr r4
433 lis r5, KERNELBASE@h
4341: lwz r3, 0(r5) /* Load one word from every line */
435 addi r5, r5, L1_CACHE_BYTES
436 bdnz 1b
437 blr
438#endif /* CONFIG_NOT_COHERENT_CACHE */
439
440/*
441 * Flush a particular page from the data cache to RAM.
442 * Note: this is necessary because the instruction cache does *not*
443 * snoop from the data cache.
444 * This is a no-op on the 601 which has a unified cache.
445 *
446 * void __flush_dcache_icache(void *page)
447 */
448_GLOBAL(__flush_dcache_icache)
449BEGIN_FTR_SECTION
450 blr /* for 601, do nothing */
451END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
452 rlwinm r3,r3,0,0,19 /* Get page base address */
453 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
454 mtctr r4
455 mr r6,r3
4560: dcbst 0,r3 /* Write line to ram */
457 addi r3,r3,L1_CACHE_BYTES
458 bdnz 0b
459 sync
460#ifndef CONFIG_44x
461 /* We don't flush the icache on 44x. Those have a virtual icache
462 * and we don't have access to the virtual address here (it's
463 * not the page vaddr but where it's mapped in user space). The
464 * flushing of the icache on these is handled elsewhere, when
465 * a change in the address space occurs, before returning to
466 * user space
467 */
468 mtctr r4
4691: icbi 0,r6
470 addi r6,r6,L1_CACHE_BYTES
471 bdnz 1b
472 sync
473 isync
474#endif /* CONFIG_44x */
475 blr
476
477/*
478 * Flush a particular page from the data cache to RAM, identified
479 * by its physical address. We turn off the MMU so we can just use
480 * the physical address (this may be a highmem page without a kernel
481 * mapping).
482 *
483 * void __flush_dcache_icache_phys(unsigned long physaddr)
484 */
485_GLOBAL(__flush_dcache_icache_phys)
486BEGIN_FTR_SECTION
487 blr /* for 601, do nothing */
488END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
489 mfmsr r10
490 rlwinm r0,r10,0,28,26 /* clear DR */
491 mtmsr r0
492 isync
493 rlwinm r3,r3,0,0,19 /* Get page base address */
494 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
495 mtctr r4
496 mr r6,r3
4970: dcbst 0,r3 /* Write line to ram */
498 addi r3,r3,L1_CACHE_BYTES
499 bdnz 0b
500 sync
501 mtctr r4
5021: icbi 0,r6
503 addi r6,r6,L1_CACHE_BYTES
504 bdnz 1b
505 sync
506 mtmsr r10 /* restore DR */
507 isync
508 blr
509
510/*
511 * Clear pages using the dcbz instruction, which doesn't cause any
512 * memory traffic (except to write out any cache lines which get
513 * displaced). This only works on cacheable memory.
514 *
515 * void clear_pages(void *page, int order) ;
516 */
517_GLOBAL(clear_pages)
518 li r0,4096/L1_CACHE_BYTES
519 slw r0,r0,r4
520 mtctr r0
521#ifdef CONFIG_8xx
522 li r4, 0
5231: stw r4, 0(r3)
524 stw r4, 4(r3)
525 stw r4, 8(r3)
526 stw r4, 12(r3)
527#else
5281: dcbz 0,r3
529#endif
530 addi r3,r3,L1_CACHE_BYTES
531 bdnz 1b
532 blr
533
534/*
535 * Copy a whole page. We use the dcbz instruction on the destination
536 * to reduce memory traffic (it eliminates the unnecessary reads of
537 * the destination into cache). This requires that the destination
538 * is cacheable.
539 */
540#define COPY_16_BYTES \
541 lwz r6,4(r4); \
542 lwz r7,8(r4); \
543 lwz r8,12(r4); \
544 lwzu r9,16(r4); \
545 stw r6,4(r3); \
546 stw r7,8(r3); \
547 stw r8,12(r3); \
548 stwu r9,16(r3)
549
550_GLOBAL(copy_page)
551 addi r3,r3,-4
552 addi r4,r4,-4
553
554#ifdef CONFIG_8xx
555 /* don't use prefetch on 8xx */
556 li r0,4096/L1_CACHE_BYTES
557 mtctr r0
5581: COPY_16_BYTES
559 bdnz 1b
560 blr
561
562#else /* not 8xx, we can prefetch */
563 li r5,4
564
565#if MAX_COPY_PREFETCH > 1
566 li r0,MAX_COPY_PREFETCH
567 li r11,4
568 mtctr r0
56911: dcbt r11,r4
570 addi r11,r11,L1_CACHE_BYTES
571 bdnz 11b
572#else /* MAX_COPY_PREFETCH == 1 */
573 dcbt r5,r4
574 li r11,L1_CACHE_BYTES+4
575#endif /* MAX_COPY_PREFETCH */
576 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
577 crclr 4*cr0+eq
5782:
579 mtctr r0
5801:
581 dcbt r11,r4
582 dcbz r5,r3
583 COPY_16_BYTES
584#if L1_CACHE_BYTES >= 32
585 COPY_16_BYTES
586#if L1_CACHE_BYTES >= 64
587 COPY_16_BYTES
588 COPY_16_BYTES
589#if L1_CACHE_BYTES >= 128
590 COPY_16_BYTES
591 COPY_16_BYTES
592 COPY_16_BYTES
593 COPY_16_BYTES
594#endif
595#endif
596#endif
597 bdnz 1b
598 beqlr
599 crnot 4*cr0+eq,4*cr0+eq
600 li r0,MAX_COPY_PREFETCH
601 li r11,4
602 b 2b
603#endif /* CONFIG_8xx */
604
605/*
606 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
607 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
608 */
609_GLOBAL(atomic_clear_mask)
61010: lwarx r5,0,r4
611 andc r5,r5,r3
612 PPC405_ERR77(0,r4)
613 stwcx. r5,0,r4
614 bne- 10b
615 blr
616_GLOBAL(atomic_set_mask)
61710: lwarx r5,0,r4
618 or r5,r5,r3
619 PPC405_ERR77(0,r4)
620 stwcx. r5,0,r4
621 bne- 10b
622 blr
623
624/*
625 * I/O string operations
626 *
627 * insb(port, buf, len)
628 * outsb(port, buf, len)
629 * insw(port, buf, len)
630 * outsw(port, buf, len)
631 * insl(port, buf, len)
632 * outsl(port, buf, len)
633 * insw_ns(port, buf, len)
634 * outsw_ns(port, buf, len)
635 * insl_ns(port, buf, len)
636 * outsl_ns(port, buf, len)
637 *
638 * The *_ns versions don't do byte-swapping.
639 */
640_GLOBAL(_insb)
641 cmpwi 0,r5,0
642 mtctr r5
643 subi r4,r4,1
644 blelr-
64500: lbz r5,0(r3)
64601: eieio
64702: stbu r5,1(r4)
648 ISYNC_8xx
649 .section .fixup,"ax"
65003: blr
651 .text
652 .section __ex_table, "a"
653 .align 2
654 .long 00b, 03b
655 .long 01b, 03b
656 .long 02b, 03b
657 .text
658 bdnz 00b
659 blr
660
661_GLOBAL(_outsb)
662 cmpwi 0,r5,0
663 mtctr r5
664 subi r4,r4,1
665 blelr-
66600: lbzu r5,1(r4)
66701: stb r5,0(r3)
66802: eieio
669 ISYNC_8xx
670 .section .fixup,"ax"
67103: blr
672 .text
673 .section __ex_table, "a"
674 .align 2
675 .long 00b, 03b
676 .long 01b, 03b
677 .long 02b, 03b
678 .text
679 bdnz 00b
680 blr
681
682_GLOBAL(_insw_ns)
683 cmpwi 0,r5,0
684 mtctr r5
685 subi r4,r4,2
686 blelr-
68700: lhz r5,0(r3)
68801: eieio
68902: sthu r5,2(r4)
690 ISYNC_8xx
691 .section .fixup,"ax"
69203: blr
693 .text
694 .section __ex_table, "a"
695 .align 2
696 .long 00b, 03b
697 .long 01b, 03b
698 .long 02b, 03b
699 .text
700 bdnz 00b
701 blr
702
703_GLOBAL(_outsw_ns)
704 cmpwi 0,r5,0
705 mtctr r5
706 subi r4,r4,2
707 blelr-
70800: lhzu r5,2(r4)
70901: sth r5,0(r3)
71002: eieio
711 ISYNC_8xx
712 .section .fixup,"ax"
71303: blr
714 .text
715 .section __ex_table, "a"
716 .align 2
717 .long 00b, 03b
718 .long 01b, 03b
719 .long 02b, 03b
720 .text
721 bdnz 00b
722 blr
723
724_GLOBAL(_insl_ns)
725 cmpwi 0,r5,0
726 mtctr r5
727 subi r4,r4,4
728 blelr-
72900: lwz r5,0(r3)
73001: eieio
73102: stwu r5,4(r4)
732 ISYNC_8xx
733 .section .fixup,"ax"
73403: blr
735 .text
736 .section __ex_table, "a"
737 .align 2
738 .long 00b, 03b
739 .long 01b, 03b
740 .long 02b, 03b
741 .text
742 bdnz 00b
743 blr
744
745_GLOBAL(_outsl_ns)
746 cmpwi 0,r5,0
747 mtctr r5
748 subi r4,r4,4
749 blelr-
75000: lwzu r5,4(r4)
75101: stw r5,0(r3)
75202: eieio
753 ISYNC_8xx
754 .section .fixup,"ax"
75503: blr
756 .text
757 .section __ex_table, "a"
758 .align 2
759 .long 00b, 03b
760 .long 01b, 03b
761 .long 02b, 03b
762 .text
763 bdnz 00b
764 blr
765
766/*
767 * Extended precision shifts.
768 *
769 * Updated to be valid for shift counts from 0 to 63 inclusive.
770 * -- Gabriel
771 *
772 * R3/R4 has 64 bit value
773 * R5 has shift count
774 * result in R3/R4
775 *
776 * ashrdi3: arithmetic right shift (sign propagation)
777 * lshrdi3: logical right shift
778 * ashldi3: left shift
779 */
780_GLOBAL(__ashrdi3)
781 subfic r6,r5,32
782 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
783 addi r7,r5,32 # could be xori, or addi with -32
784 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
785 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
786 sraw r7,r3,r7 # t2 = MSW >> (count-32)
787 or r4,r4,r6 # LSW |= t1
788 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
789 sraw r3,r3,r5 # MSW = MSW >> count
790 or r4,r4,r7 # LSW |= t2
791 blr
792
793_GLOBAL(__ashldi3)
794 subfic r6,r5,32
795 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
796 addi r7,r5,32 # could be xori, or addi with -32
797 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
798 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
799 or r3,r3,r6 # MSW |= t1
800 slw r4,r4,r5 # LSW = LSW << count
801 or r3,r3,r7 # MSW |= t2
802 blr
803
804_GLOBAL(__lshrdi3)
805 subfic r6,r5,32
806 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
807 addi r7,r5,32 # could be xori, or addi with -32
808 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
809 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
810 or r4,r4,r6 # LSW |= t1
811 srw r3,r3,r5 # MSW = MSW >> count
812 or r4,r4,r7 # LSW |= t2
813 blr
814
815_GLOBAL(abs)
816 srawi r4,r3,31
817 xor r3,r3,r4
818 sub r3,r3,r4
819 blr
820
821_GLOBAL(_get_SP)
822 mr r3,r1 /* Close enough */
823 blr
824
825/*
826 * Create a kernel thread
827 * kernel_thread(fn, arg, flags)
828 */
829_GLOBAL(kernel_thread)
830 stwu r1,-16(r1)
831 stw r30,8(r1)
832 stw r31,12(r1)
833 mr r30,r3 /* function */
834 mr r31,r4 /* argument */
835 ori r3,r5,CLONE_VM /* flags */
836 oris r3,r3,CLONE_UNTRACED>>16
837 li r4,0 /* new sp (unused) */
838 li r0,__NR_clone
839 sc
840 cmpwi 0,r3,0 /* parent or child? */
841 bne 1f /* return if parent */
842 li r0,0 /* make top-level stack frame */
843 stwu r0,-16(r1)
844 mtlr r30 /* fn addr in lr */
845 mr r3,r31 /* load arg and call fn */
846 PPC440EP_ERR42
847 blrl
848 li r0,__NR_exit /* exit if function returns */
849 li r3,0
850 sc
8511: lwz r30,8(r1)
852 lwz r31,12(r1)
853 addi r1,r1,16
854 blr
855
856_GLOBAL(kernel_execve)
857 li r0,__NR_execve
858 sc
859 bnslr
860 neg r3,r3
861 blr
862
863/*
864 * This routine is just here to keep GCC happy - sigh...
865 */
866_GLOBAL(__main)
867 blr
868
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c
deleted file mode 100644
index df3ef6db072c..000000000000
--- a/arch/ppc/kernel/pci.c
+++ /dev/null
@@ -1,1233 +0,0 @@
1/*
2 * Common prep/chrp pci routines. -- Cort
3 */
4
5#include <linux/kernel.h>
6#include <linux/pci.h>
7#include <linux/delay.h>
8#include <linux/string.h>
9#include <linux/init.h>
10#include <linux/capability.h>
11#include <linux/sched.h>
12#include <linux/errno.h>
13#include <linux/bootmem.h>
14
15#include <asm/processor.h>
16#include <asm/io.h>
17#include <asm/prom.h>
18#include <asm/sections.h>
19#include <asm/pci-bridge.h>
20#include <asm/byteorder.h>
21#include <asm/irq.h>
22#include <asm/uaccess.h>
23#include <asm/machdep.h>
24
25#undef DEBUG
26
27#ifdef DEBUG
28#define DBG(x...) printk(x)
29#else
30#define DBG(x...)
31#endif
32
33unsigned long isa_io_base = 0;
34unsigned long isa_mem_base = 0;
35unsigned long pci_dram_offset = 0;
36int pcibios_assign_bus_offset = 1;
37
38void pcibios_make_OF_bus_map(void);
39
40static int pci_relocate_bridge_resource(struct pci_bus *bus, int i);
41static int probe_resource(struct pci_bus *parent, struct resource *pr,
42 struct resource *res, struct resource **conflict);
43static void update_bridge_base(struct pci_bus *bus, int i);
44static void pcibios_fixup_resources(struct pci_dev* dev);
45static void fixup_broken_pcnet32(struct pci_dev* dev);
46static int reparent_resources(struct resource *parent, struct resource *res);
47static void fixup_cpc710_pci64(struct pci_dev* dev);
48
49/* By default, we don't re-assign bus numbers.
50 */
51int pci_assign_all_buses;
52
53struct pci_controller* hose_head;
54struct pci_controller** hose_tail = &hose_head;
55
56static int pci_bus_count;
57
58static void
59fixup_broken_pcnet32(struct pci_dev* dev)
60{
61 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
62 dev->vendor = PCI_VENDOR_ID_AMD;
63 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
64 }
65}
66DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
67
68static void
69fixup_cpc710_pci64(struct pci_dev* dev)
70{
71 /* Hide the PCI64 BARs from the kernel as their content doesn't
72 * fit well in the resource management
73 */
74 dev->resource[0].start = dev->resource[0].end = 0;
75 dev->resource[0].flags = 0;
76 dev->resource[1].start = dev->resource[1].end = 0;
77 dev->resource[1].flags = 0;
78}
79DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64);
80
81static void
82pcibios_fixup_resources(struct pci_dev *dev)
83{
84 struct pci_controller* hose = (struct pci_controller *)dev->sysdata;
85 int i;
86 unsigned long offset;
87
88 if (!hose) {
89 printk(KERN_ERR "No hose for PCI dev %s!\n", pci_name(dev));
90 return;
91 }
92 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
93 struct resource *res = dev->resource + i;
94 if (!res->flags)
95 continue;
96 if (res->end == 0xffffffff) {
97 DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
98 pci_name(dev), i,
99 (unsigned long long)res->start,
100 (unsigned long long)res->end);
101 res->end -= res->start;
102 res->start = 0;
103 res->flags |= IORESOURCE_UNSET;
104 continue;
105 }
106 offset = 0;
107 if (res->flags & IORESOURCE_MEM) {
108 offset = hose->pci_mem_offset;
109 } else if (res->flags & IORESOURCE_IO) {
110 offset = (unsigned long) hose->io_base_virt
111 - isa_io_base;
112 }
113 if (offset != 0) {
114 res->start += offset;
115 res->end += offset;
116#ifdef DEBUG
117 printk("Fixup res %d (%lx) of dev %s: %lx -> %lx\n",
118 i, res->flags, pci_name(dev),
119 res->start - offset, res->start);
120#endif
121 }
122 }
123
124 /* Call machine specific resource fixup */
125 if (ppc_md.pcibios_fixup_resources)
126 ppc_md.pcibios_fixup_resources(dev);
127}
128DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
129
130void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
131 struct resource *res)
132{
133 unsigned long offset = 0;
134 struct pci_controller *hose = dev->sysdata;
135
136 if (hose && res->flags & IORESOURCE_IO)
137 offset = (unsigned long)hose->io_base_virt - isa_io_base;
138 else if (hose && res->flags & IORESOURCE_MEM)
139 offset = hose->pci_mem_offset;
140 region->start = res->start - offset;
141 region->end = res->end - offset;
142}
143EXPORT_SYMBOL(pcibios_resource_to_bus);
144
145void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
146 struct pci_bus_region *region)
147{
148 unsigned long offset = 0;
149 struct pci_controller *hose = dev->sysdata;
150
151 if (hose && res->flags & IORESOURCE_IO)
152 offset = (unsigned long)hose->io_base_virt - isa_io_base;
153 else if (hose && res->flags & IORESOURCE_MEM)
154 offset = hose->pci_mem_offset;
155 res->start = region->start + offset;
156 res->end = region->end + offset;
157}
158EXPORT_SYMBOL(pcibios_bus_to_resource);
159
160/*
161 * We need to avoid collisions with `mirrored' VGA ports
162 * and other strange ISA hardware, so we always want the
163 * addresses to be allocated in the 0x000-0x0ff region
164 * modulo 0x400.
165 *
166 * Why? Because some silly external IO cards only decode
167 * the low 10 bits of the IO address. The 0x00-0xff region
168 * is reserved for motherboard devices that decode all 16
169 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
170 * but we want to try to avoid allocating at 0x2900-0x2bff
171 * which might have be mirrored at 0x0100-0x03ff..
172 */
173void pcibios_align_resource(void *data, struct resource *res,
174 resource_size_t size, resource_size_t align)
175{
176 struct pci_dev *dev = data;
177
178 if (res->flags & IORESOURCE_IO) {
179 resource_size_t start = res->start;
180
181 if (size > 0x100) {
182 printk(KERN_ERR "PCI: I/O Region %s/%d too large"
183 " (%lld bytes)\n", pci_name(dev),
184 dev->resource - res, (unsigned long long)size);
185 }
186
187 if (start & 0x300) {
188 start = (start + 0x3ff) & ~0x3ff;
189 res->start = start;
190 }
191 }
192}
193EXPORT_SYMBOL(pcibios_align_resource);
194
195/*
196 * Handle resources of PCI devices. If the world were perfect, we could
197 * just allocate all the resource regions and do nothing more. It isn't.
198 * On the other hand, we cannot just re-allocate all devices, as it would
199 * require us to know lots of host bridge internals. So we attempt to
200 * keep as much of the original configuration as possible, but tweak it
201 * when it's found to be wrong.
202 *
203 * Known BIOS problems we have to work around:
204 * - I/O or memory regions not configured
205 * - regions configured, but not enabled in the command register
206 * - bogus I/O addresses above 64K used
207 * - expansion ROMs left enabled (this may sound harmless, but given
208 * the fact the PCI specs explicitly allow address decoders to be
209 * shared between expansion ROMs and other resource regions, it's
210 * at least dangerous)
211 *
212 * Our solution:
213 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
214 * This gives us fixed barriers on where we can allocate.
215 * (2) Allocate resources for all enabled devices. If there is
216 * a collision, just mark the resource as unallocated. Also
217 * disable expansion ROMs during this step.
218 * (3) Try to allocate resources for disabled devices. If the
219 * resources were assigned correctly, everything goes well,
220 * if they weren't, they won't disturb allocation of other
221 * resources.
222 * (4) Assign new addresses to resources which were either
223 * not configured at all or misconfigured. If explicitly
224 * requested by the user, configure expansion ROM address
225 * as well.
226 */
227
228static void __init
229pcibios_allocate_bus_resources(struct list_head *bus_list)
230{
231 struct pci_bus *bus;
232 int i;
233 struct resource *res, *pr;
234
235 /* Depth-First Search on bus tree */
236 list_for_each_entry(bus, bus_list, node) {
237 for (i = 0; i < 4; ++i) {
238 if ((res = bus->resource[i]) == NULL || !res->flags
239 || res->start > res->end)
240 continue;
241 if (bus->parent == NULL)
242 pr = (res->flags & IORESOURCE_IO)?
243 &ioport_resource: &iomem_resource;
244 else {
245 pr = pci_find_parent_resource(bus->self, res);
246 if (pr == res) {
247 /* this happens when the generic PCI
248 * code (wrongly) decides that this
249 * bridge is transparent -- paulus
250 */
251 continue;
252 }
253 }
254
255 DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n",
256 (unsigned long long)res->start,
257 (unsigned long long)res->end, res->flags, pr);
258 if (pr) {
259 if (request_resource(pr, res) == 0)
260 continue;
261 /*
262 * Must be a conflict with an existing entry.
263 * Move that entry (or entries) under the
264 * bridge resource and try again.
265 */
266 if (reparent_resources(pr, res) == 0)
267 continue;
268 }
269 printk(KERN_ERR "PCI: Cannot allocate resource region "
270 "%d of PCI bridge %d\n", i, bus->number);
271 if (pci_relocate_bridge_resource(bus, i))
272 bus->resource[i] = NULL;
273 }
274 pcibios_allocate_bus_resources(&bus->children);
275 }
276}
277
278/*
279 * Reparent resource children of pr that conflict with res
280 * under res, and make res replace those children.
281 */
282static int __init
283reparent_resources(struct resource *parent, struct resource *res)
284{
285 struct resource *p, **pp;
286 struct resource **firstpp = NULL;
287
288 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
289 if (p->end < res->start)
290 continue;
291 if (res->end < p->start)
292 break;
293 if (p->start < res->start || p->end > res->end)
294 return -1; /* not completely contained */
295 if (firstpp == NULL)
296 firstpp = pp;
297 }
298 if (firstpp == NULL)
299 return -1; /* didn't find any conflicting entries? */
300 res->parent = parent;
301 res->child = *firstpp;
302 res->sibling = *pp;
303 *firstpp = res;
304 *pp = NULL;
305 for (p = res->child; p != NULL; p = p->sibling) {
306 p->parent = res;
307 DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
308 p->name, (unsigned long long)p->start,
309 (unsigned long long)p->end, res->name);
310 }
311 return 0;
312}
313
314/*
315 * A bridge has been allocated a range which is outside the range
316 * of its parent bridge, so it needs to be moved.
317 */
318static int __init
319pci_relocate_bridge_resource(struct pci_bus *bus, int i)
320{
321 struct resource *res, *pr, *conflict;
322 unsigned long try, size;
323 int j;
324 struct pci_bus *parent = bus->parent;
325
326 if (parent == NULL) {
327 /* shouldn't ever happen */
328 printk(KERN_ERR "PCI: can't move host bridge resource\n");
329 return -1;
330 }
331 res = bus->resource[i];
332 if (res == NULL)
333 return -1;
334 pr = NULL;
335 for (j = 0; j < 4; j++) {
336 struct resource *r = parent->resource[j];
337 if (!r)
338 continue;
339 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
340 continue;
341 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) {
342 pr = r;
343 break;
344 }
345 if (res->flags & IORESOURCE_PREFETCH)
346 pr = r;
347 }
348 if (pr == NULL)
349 return -1;
350 size = res->end - res->start;
351 if (pr->start > pr->end || size > pr->end - pr->start)
352 return -1;
353 try = pr->end;
354 for (;;) {
355 res->start = try - size;
356 res->end = try;
357 if (probe_resource(bus->parent, pr, res, &conflict) == 0)
358 break;
359 if (conflict->start <= pr->start + size)
360 return -1;
361 try = conflict->start - 1;
362 }
363 if (request_resource(pr, res)) {
364 DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n",
365 (unsigned long long)res->start,
366 (unsigned long long)res->end);
367 return -1; /* "can't happen" */
368 }
369 update_bridge_base(bus, i);
370 printk(KERN_INFO "PCI: bridge %d resource %d moved to %llx..%llx\n",
371 bus->number, i, (unsigned long long)res->start,
372 (unsigned long long)res->end);
373 return 0;
374}
375
376static int __init
377probe_resource(struct pci_bus *parent, struct resource *pr,
378 struct resource *res, struct resource **conflict)
379{
380 struct pci_bus *bus;
381 struct pci_dev *dev;
382 struct resource *r;
383 int i;
384
385 for (r = pr->child; r != NULL; r = r->sibling) {
386 if (r->end >= res->start && res->end >= r->start) {
387 *conflict = r;
388 return 1;
389 }
390 }
391 list_for_each_entry(bus, &parent->children, node) {
392 for (i = 0; i < 4; ++i) {
393 if ((r = bus->resource[i]) == NULL)
394 continue;
395 if (!r->flags || r->start > r->end || r == res)
396 continue;
397 if (pci_find_parent_resource(bus->self, r) != pr)
398 continue;
399 if (r->end >= res->start && res->end >= r->start) {
400 *conflict = r;
401 return 1;
402 }
403 }
404 }
405 list_for_each_entry(dev, &parent->devices, bus_list) {
406 for (i = 0; i < 6; ++i) {
407 r = &dev->resource[i];
408 if (!r->flags || (r->flags & IORESOURCE_UNSET))
409 continue;
410 if (pci_find_parent_resource(dev, r) != pr)
411 continue;
412 if (r->end >= res->start && res->end >= r->start) {
413 *conflict = r;
414 return 1;
415 }
416 }
417 }
418 return 0;
419}
420
421static void __init
422update_bridge_base(struct pci_bus *bus, int i)
423{
424 struct resource *res = bus->resource[i];
425 u8 io_base_lo, io_limit_lo;
426 u16 mem_base, mem_limit;
427 u16 cmd;
428 unsigned long start, end, off;
429 struct pci_dev *dev = bus->self;
430 struct pci_controller *hose = dev->sysdata;
431
432 if (!hose) {
433 printk("update_bridge_base: no hose?\n");
434 return;
435 }
436 pci_read_config_word(dev, PCI_COMMAND, &cmd);
437 pci_write_config_word(dev, PCI_COMMAND,
438 cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY));
439 if (res->flags & IORESOURCE_IO) {
440 off = (unsigned long) hose->io_base_virt - isa_io_base;
441 start = res->start - off;
442 end = res->end - off;
443 io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK;
444 io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK;
445 if (end > 0xffff) {
446 pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
447 start >> 16);
448 pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
449 end >> 16);
450 io_base_lo |= PCI_IO_RANGE_TYPE_32;
451 } else
452 io_base_lo |= PCI_IO_RANGE_TYPE_16;
453 pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo);
454 pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo);
455
456 } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
457 == IORESOURCE_MEM) {
458 off = hose->pci_mem_offset;
459 mem_base = ((res->start - off) >> 16) & PCI_MEMORY_RANGE_MASK;
460 mem_limit = ((res->end - off) >> 16) & PCI_MEMORY_RANGE_MASK;
461 pci_write_config_word(dev, PCI_MEMORY_BASE, mem_base);
462 pci_write_config_word(dev, PCI_MEMORY_LIMIT, mem_limit);
463
464 } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
465 == (IORESOURCE_MEM | IORESOURCE_PREFETCH)) {
466 off = hose->pci_mem_offset;
467 mem_base = ((res->start - off) >> 16) & PCI_PREF_RANGE_MASK;
468 mem_limit = ((res->end - off) >> 16) & PCI_PREF_RANGE_MASK;
469 pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, mem_base);
470 pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, mem_limit);
471
472 } else {
473 DBG(KERN_ERR "PCI: ugh, bridge %s res %d has flags=%lx\n",
474 pci_name(dev), i, res->flags);
475 }
476 pci_write_config_word(dev, PCI_COMMAND, cmd);
477}
478
479static inline void alloc_resource(struct pci_dev *dev, int idx)
480{
481 struct resource *pr, *r = &dev->resource[idx];
482
483 DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n",
484 pci_name(dev), idx, (unsigned long long)r->start,
485 (unsigned long long)r->end, r->flags);
486 pr = pci_find_parent_resource(dev, r);
487 if (!pr || request_resource(pr, r) < 0) {
488 printk(KERN_ERR "PCI: Cannot allocate resource region %d"
489 " of device %s\n", idx, pci_name(dev));
490 if (pr)
491 DBG("PCI: parent is %p: %016llx-%016llx (f=%lx)\n",
492 pr, (unsigned long long)pr->start,
493 (unsigned long long)pr->end, pr->flags);
494 /* We'll assign a new address later */
495 r->flags |= IORESOURCE_UNSET;
496 r->end -= r->start;
497 r->start = 0;
498 }
499}
500
501static void __init
502pcibios_allocate_resources(int pass)
503{
504 struct pci_dev *dev = NULL;
505 int idx, disabled;
506 u16 command;
507 struct resource *r;
508
509 for_each_pci_dev(dev) {
510 pci_read_config_word(dev, PCI_COMMAND, &command);
511 for (idx = 0; idx < 6; idx++) {
512 r = &dev->resource[idx];
513 if (r->parent) /* Already allocated */
514 continue;
515 if (!r->flags || (r->flags & IORESOURCE_UNSET))
516 continue; /* Not assigned at all */
517 if (r->flags & IORESOURCE_IO)
518 disabled = !(command & PCI_COMMAND_IO);
519 else
520 disabled = !(command & PCI_COMMAND_MEMORY);
521 if (pass == disabled)
522 alloc_resource(dev, idx);
523 }
524 if (pass)
525 continue;
526 r = &dev->resource[PCI_ROM_RESOURCE];
527 if (r->flags & IORESOURCE_ROM_ENABLE) {
528 /* Turn the ROM off, leave the resource region, but keep it unregistered. */
529 u32 reg;
530 DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
531 r->flags &= ~IORESOURCE_ROM_ENABLE;
532 pci_read_config_dword(dev, dev->rom_base_reg, &reg);
533 pci_write_config_dword(dev, dev->rom_base_reg,
534 reg & ~PCI_ROM_ADDRESS_ENABLE);
535 }
536 }
537}
538
539static void __init
540pcibios_assign_resources(void)
541{
542 struct pci_dev *dev = NULL;
543 int idx;
544 struct resource *r;
545
546 for_each_pci_dev(dev) {
547 int class = dev->class >> 8;
548
549 /* Don't touch classless devices and host bridges */
550 if (!class || class == PCI_CLASS_BRIDGE_HOST)
551 continue;
552
553 for (idx = 0; idx < 6; idx++) {
554 r = &dev->resource[idx];
555
556 /*
557 * We shall assign a new address to this resource,
558 * either because the BIOS (sic) forgot to do so
559 * or because we have decided the old address was
560 * unusable for some reason.
561 */
562 if ((r->flags & IORESOURCE_UNSET) && r->end &&
563 (!ppc_md.pcibios_enable_device_hook ||
564 !ppc_md.pcibios_enable_device_hook(dev, 1))) {
565 r->flags &= ~IORESOURCE_UNSET;
566 pci_assign_resource(dev, idx);
567 }
568 }
569
570#if 0 /* don't assign ROMs */
571 r = &dev->resource[PCI_ROM_RESOURCE];
572 r->end -= r->start;
573 r->start = 0;
574 if (r->end)
575 pci_assign_resource(dev, PCI_ROM_RESOURCE);
576#endif
577 }
578}
579
580
581static int next_controller_index;
582
583struct pci_controller * __init
584pcibios_alloc_controller(void)
585{
586 struct pci_controller *hose;
587
588 hose = (struct pci_controller *)alloc_bootmem(sizeof(*hose));
589 memset(hose, 0, sizeof(struct pci_controller));
590
591 *hose_tail = hose;
592 hose_tail = &hose->next;
593
594 hose->index = next_controller_index++;
595
596 return hose;
597}
598
599void pcibios_make_OF_bus_map(void)
600{
601}
602
603static int __init
604pcibios_init(void)
605{
606 struct pci_controller *hose;
607 struct pci_bus *bus;
608 int next_busno;
609
610 printk(KERN_INFO "PCI: Probing PCI hardware\n");
611
612 /* Scan all of the recorded PCI controllers. */
613 for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
614 if (pci_assign_all_buses)
615 hose->first_busno = next_busno;
616 hose->last_busno = 0xff;
617 bus = pci_scan_bus(hose->first_busno, hose->ops, hose);
618 hose->last_busno = bus->subordinate;
619 if (pci_assign_all_buses || next_busno <= hose->last_busno)
620 next_busno = hose->last_busno + pcibios_assign_bus_offset;
621 }
622 pci_bus_count = next_busno;
623
624 /* OpenFirmware based machines need a map of OF bus
625 * numbers vs. kernel bus numbers since we may have to
626 * remap them.
627 */
628 if (pci_assign_all_buses && have_of)
629 pcibios_make_OF_bus_map();
630
631 /* Do machine dependent PCI interrupt routing */
632 if (ppc_md.pci_swizzle && ppc_md.pci_map_irq)
633 pci_fixup_irqs(ppc_md.pci_swizzle, ppc_md.pci_map_irq);
634
635 /* Call machine dependent fixup */
636 if (ppc_md.pcibios_fixup)
637 ppc_md.pcibios_fixup();
638
639 /* Allocate and assign resources */
640 pcibios_allocate_bus_resources(&pci_root_buses);
641 pcibios_allocate_resources(0);
642 pcibios_allocate_resources(1);
643 pcibios_assign_resources();
644
645 /* Call machine dependent post-init code */
646 if (ppc_md.pcibios_after_init)
647 ppc_md.pcibios_after_init();
648
649 return 0;
650}
651
652subsys_initcall(pcibios_init);
653
654unsigned char __init
655common_swizzle(struct pci_dev *dev, unsigned char *pinp)
656{
657 struct pci_controller *hose = dev->sysdata;
658
659 if (dev->bus->number != hose->first_busno) {
660 u8 pin = *pinp;
661 do {
662 pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
663 /* Move up the chain of bridges. */
664 dev = dev->bus->self;
665 } while (dev->bus->self);
666 *pinp = pin;
667
668 /* The slot is the idsel of the last bridge. */
669 }
670 return PCI_SLOT(dev->devfn);
671}
672
673unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
674 unsigned long start, unsigned long size)
675{
676 return start;
677}
678
679void __init pcibios_fixup_bus(struct pci_bus *bus)
680{
681 struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
682 unsigned long io_offset;
683 struct resource *res;
684 int i;
685
686 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
687 if (bus->parent == NULL) {
688 /* This is a host bridge - fill in its resources */
689 hose->bus = bus;
690
691 bus->resource[0] = res = &hose->io_resource;
692 if (!res->flags) {
693 if (io_offset)
694 printk(KERN_ERR "I/O resource not set for host"
695 " bridge %d\n", hose->index);
696 res->start = 0;
697 res->end = IO_SPACE_LIMIT;
698 res->flags = IORESOURCE_IO;
699 }
700 res->start += io_offset;
701 res->end += io_offset;
702
703 for (i = 0; i < 3; ++i) {
704 res = &hose->mem_resources[i];
705 if (!res->flags) {
706 if (i > 0)
707 continue;
708 printk(KERN_ERR "Memory resource not set for "
709 "host bridge %d\n", hose->index);
710 res->start = hose->pci_mem_offset;
711 res->end = ~0U;
712 res->flags = IORESOURCE_MEM;
713 }
714 bus->resource[i+1] = res;
715 }
716 } else {
717 /* This is a subordinate bridge */
718 pci_read_bridge_bases(bus);
719
720 for (i = 0; i < 4; ++i) {
721 if ((res = bus->resource[i]) == NULL)
722 continue;
723 if (!res->flags)
724 continue;
725 if (io_offset && (res->flags & IORESOURCE_IO)) {
726 res->start += io_offset;
727 res->end += io_offset;
728 } else if (hose->pci_mem_offset
729 && (res->flags & IORESOURCE_MEM)) {
730 res->start += hose->pci_mem_offset;
731 res->end += hose->pci_mem_offset;
732 }
733 }
734 }
735
736 if (ppc_md.pcibios_fixup_bus)
737 ppc_md.pcibios_fixup_bus(bus);
738}
739
740char __init *pcibios_setup(char *str)
741{
742 return str;
743}
744
745/* the next one is stolen from the alpha port... */
746void __init
747pcibios_update_irq(struct pci_dev *dev, int irq)
748{
749 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
750 /* XXX FIXME - update OF device tree node interrupt property */
751}
752
753int pcibios_enable_device(struct pci_dev *dev, int mask)
754{
755 if (ppc_md.pcibios_enable_device_hook)
756 if (ppc_md.pcibios_enable_device_hook(dev, 0))
757 return -EINVAL;
758
759 return pci_enable_resources(dev, mask);
760}
761
762struct pci_controller*
763pci_bus_to_hose(int bus)
764{
765 struct pci_controller* hose = hose_head;
766
767 for (; hose; hose = hose->next)
768 if (bus >= hose->first_busno && bus <= hose->last_busno)
769 return hose;
770 return NULL;
771}
772
773void __iomem *
774pci_bus_io_base(unsigned int bus)
775{
776 struct pci_controller *hose;
777
778 hose = pci_bus_to_hose(bus);
779 if (!hose)
780 return NULL;
781 return hose->io_base_virt;
782}
783
784unsigned long
785pci_bus_io_base_phys(unsigned int bus)
786{
787 struct pci_controller *hose;
788
789 hose = pci_bus_to_hose(bus);
790 if (!hose)
791 return 0;
792 return hose->io_base_phys;
793}
794
795unsigned long
796pci_bus_mem_base_phys(unsigned int bus)
797{
798 struct pci_controller *hose;
799
800 hose = pci_bus_to_hose(bus);
801 if (!hose)
802 return 0;
803 return hose->pci_mem_offset;
804}
805
806unsigned long
807pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
808{
809 /* Hack alert again ! See comments in chrp_pci.c
810 */
811 struct pci_controller* hose =
812 (struct pci_controller *)pdev->sysdata;
813 if (hose && res->flags & IORESOURCE_MEM)
814 return res->start - hose->pci_mem_offset;
815 /* We may want to do something with IOs here... */
816 return res->start;
817}
818
819
820static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
821 resource_size_t *offset,
822 enum pci_mmap_state mmap_state)
823{
824 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
825 unsigned long io_offset = 0;
826 int i, res_bit;
827
828 if (hose == 0)
829 return NULL; /* should never happen */
830
831 /* If memory, add on the PCI bridge address offset */
832 if (mmap_state == pci_mmap_mem) {
833#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
834 *offset += hose->pci_mem_offset;
835#endif
836 res_bit = IORESOURCE_MEM;
837 } else {
838 io_offset = hose->io_base_virt - ___IO_BASE;
839 *offset += io_offset;
840 res_bit = IORESOURCE_IO;
841 }
842
843 /*
844 * Check that the offset requested corresponds to one of the
845 * resources of the device.
846 */
847 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
848 struct resource *rp = &dev->resource[i];
849 int flags = rp->flags;
850
851 /* treat ROM as memory (should be already) */
852 if (i == PCI_ROM_RESOURCE)
853 flags |= IORESOURCE_MEM;
854
855 /* Active and same type? */
856 if ((flags & res_bit) == 0)
857 continue;
858
859 /* In the range of this resource? */
860 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
861 continue;
862
863 /* found it! construct the final physical address */
864 if (mmap_state == pci_mmap_io)
865 *offset += hose->io_base_phys - io_offset;
866 return rp;
867 }
868
869 return NULL;
870}
871
872/*
873 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
874 * device mapping.
875 */
876static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
877 pgprot_t protection,
878 enum pci_mmap_state mmap_state,
879 int write_combine)
880{
881 unsigned long prot = pgprot_val(protection);
882
883 /* Write combine is always 0 on non-memory space mappings. On
884 * memory space, if the user didn't pass 1, we check for a
885 * "prefetchable" resource. This is a bit hackish, but we use
886 * this to workaround the inability of /sysfs to provide a write
887 * combine bit
888 */
889 if (mmap_state != pci_mmap_mem)
890 write_combine = 0;
891 else if (write_combine == 0) {
892 if (rp->flags & IORESOURCE_PREFETCH)
893 write_combine = 1;
894 }
895
896 /* XXX would be nice to have a way to ask for write-through */
897 prot |= _PAGE_NO_CACHE;
898 if (write_combine)
899 prot &= ~_PAGE_GUARDED;
900 else
901 prot |= _PAGE_GUARDED;
902
903 printk("PCI map for %s:%llx, prot: %lx\n", pci_name(dev),
904 (unsigned long long)rp->start, prot);
905
906 return __pgprot(prot);
907}
908
909/*
910 * This one is used by /dev/mem and fbdev who have no clue about the
911 * PCI device, it tries to find the PCI device first and calls the
912 * above routine
913 */
914pgprot_t pci_phys_mem_access_prot(struct file *file,
915 unsigned long pfn,
916 unsigned long size,
917 pgprot_t protection)
918{
919 struct pci_dev *pdev = NULL;
920 struct resource *found = NULL;
921 unsigned long prot = pgprot_val(protection);
922 unsigned long offset = pfn << PAGE_SHIFT;
923 int i;
924
925 if (page_is_ram(pfn))
926 return prot;
927
928 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
929
930 for_each_pci_dev(pdev) {
931 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
932 struct resource *rp = &pdev->resource[i];
933 int flags = rp->flags;
934
935 /* Active and same type? */
936 if ((flags & IORESOURCE_MEM) == 0)
937 continue;
938 /* In the range of this resource? */
939 if (offset < (rp->start & PAGE_MASK) ||
940 offset > rp->end)
941 continue;
942 found = rp;
943 break;
944 }
945 if (found)
946 break;
947 }
948 if (found) {
949 if (found->flags & IORESOURCE_PREFETCH)
950 prot &= ~_PAGE_GUARDED;
951 pci_dev_put(pdev);
952 }
953
954 DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
955
956 return __pgprot(prot);
957}
958
959
960/*
961 * Perform the actual remap of the pages for a PCI device mapping, as
962 * appropriate for this architecture. The region in the process to map
963 * is described by vm_start and vm_end members of VMA, the base physical
964 * address is found in vm_pgoff.
965 * The pci device structure is provided so that architectures may make mapping
966 * decisions on a per-device or per-bus basis.
967 *
968 * Returns a negative error code on failure, zero on success.
969 */
970int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
971 enum pci_mmap_state mmap_state,
972 int write_combine)
973{
974 resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
975 struct resource *rp;
976 int ret;
977
978 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
979 if (rp == NULL)
980 return -EINVAL;
981
982 vma->vm_pgoff = offset >> PAGE_SHIFT;
983 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
984 vma->vm_page_prot,
985 mmap_state, write_combine);
986
987 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
988 vma->vm_end - vma->vm_start, vma->vm_page_prot);
989
990 return ret;
991}
992
993/* Obsolete functions. Should be removed once the symbios driver
994 * is fixed
995 */
996unsigned long
997phys_to_bus(unsigned long pa)
998{
999 struct pci_controller *hose;
1000 int i;
1001
1002 for (hose = hose_head; hose; hose = hose->next) {
1003 for (i = 0; i < 3; ++i) {
1004 if (pa >= hose->mem_resources[i].start
1005 && pa <= hose->mem_resources[i].end) {
1006 /*
1007 * XXX the hose->pci_mem_offset really
1008 * only applies to mem_resources[0].
1009 * We need a way to store an offset for
1010 * the others. -- paulus
1011 */
1012 if (i == 0)
1013 pa -= hose->pci_mem_offset;
1014 return pa;
1015 }
1016 }
1017 }
1018 /* hmmm, didn't find it */
1019 return 0;
1020}
1021
1022unsigned long
1023pci_phys_to_bus(unsigned long pa, int busnr)
1024{
1025 struct pci_controller* hose = pci_bus_to_hose(busnr);
1026 if (!hose)
1027 return pa;
1028 return pa - hose->pci_mem_offset;
1029}
1030
1031unsigned long
1032pci_bus_to_phys(unsigned int ba, int busnr)
1033{
1034 struct pci_controller* hose = pci_bus_to_hose(busnr);
1035 if (!hose)
1036 return ba;
1037 return ba + hose->pci_mem_offset;
1038}
1039
1040/* Provide information on locations of various I/O regions in physical
1041 * memory. Do this on a per-card basis so that we choose the right
1042 * root bridge.
1043 * Note that the returned IO or memory base is a physical address
1044 */
1045
1046long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
1047{
1048 struct pci_controller* hose;
1049 long result = -EOPNOTSUPP;
1050
1051 hose = pci_bus_to_hose(bus);
1052 if (!hose)
1053 return -ENODEV;
1054
1055 switch (which) {
1056 case IOBASE_BRIDGE_NUMBER:
1057 return (long)hose->first_busno;
1058 case IOBASE_MEMORY:
1059 return (long)hose->pci_mem_offset;
1060 case IOBASE_IO:
1061 return (long)hose->io_base_phys;
1062 case IOBASE_ISA_IO:
1063 return (long)isa_io_base;
1064 case IOBASE_ISA_MEM:
1065 return (long)isa_mem_base;
1066 }
1067
1068 return result;
1069}
1070
1071void pci_resource_to_user(const struct pci_dev *dev, int bar,
1072 const struct resource *rsrc,
1073 resource_size_t *start, resource_size_t *end)
1074{
1075 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
1076 resource_size_t offset = 0;
1077
1078 if (hose == NULL)
1079 return;
1080
1081 if (rsrc->flags & IORESOURCE_IO)
1082 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1083
1084 /* We pass a fully fixed up address to userland for MMIO instead of
1085 * a BAR value because X is lame and expects to be able to use that
1086 * to pass to /dev/mem !
1087 *
1088 * That means that we'll have potentially 64 bits values where some
1089 * userland apps only expect 32 (like X itself since it thinks only
1090 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
1091 * 32 bits CHRPs :-(
1092 *
1093 * Hopefully, the sysfs insterface is immune to that gunk. Once X
1094 * has been fixed (and the fix spread enough), we can re-enable the
1095 * 2 lines below and pass down a BAR value to userland. In that case
1096 * we'll also have to re-enable the matching code in
1097 * __pci_mmap_make_offset().
1098 *
1099 * BenH.
1100 */
1101#if 0
1102 else if (rsrc->flags & IORESOURCE_MEM)
1103 offset = hose->pci_mem_offset;
1104#endif
1105
1106 *start = rsrc->start - offset;
1107 *end = rsrc->end - offset;
1108}
1109
1110void __init pci_init_resource(struct resource *res, resource_size_t start,
1111 resource_size_t end, int flags, char *name)
1112{
1113 res->start = start;
1114 res->end = end;
1115 res->flags = flags;
1116 res->name = name;
1117 res->parent = NULL;
1118 res->sibling = NULL;
1119 res->child = NULL;
1120}
1121
1122void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
1123{
1124 resource_size_t start = pci_resource_start(dev, bar);
1125 resource_size_t len = pci_resource_len(dev, bar);
1126 unsigned long flags = pci_resource_flags(dev, bar);
1127
1128 if (!len)
1129 return NULL;
1130 if (max && len > max)
1131 len = max;
1132 if (flags & IORESOURCE_IO)
1133 return ioport_map(start, len);
1134 if (flags & IORESOURCE_MEM)
1135 /* Not checking IORESOURCE_CACHEABLE because PPC does
1136 * not currently distinguish between ioremap and
1137 * ioremap_nocache.
1138 */
1139 return ioremap(start, len);
1140 /* What? */
1141 return NULL;
1142}
1143
1144void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
1145{
1146 /* Nothing to do */
1147}
1148EXPORT_SYMBOL(pci_iomap);
1149EXPORT_SYMBOL(pci_iounmap);
1150
1151unsigned long pci_address_to_pio(phys_addr_t address)
1152{
1153 struct pci_controller* hose = hose_head;
1154
1155 for (; hose; hose = hose->next) {
1156 unsigned int size = hose->io_resource.end -
1157 hose->io_resource.start + 1;
1158 if (address >= hose->io_base_phys &&
1159 address < (hose->io_base_phys + size)) {
1160 unsigned long base =
1161 (unsigned long)hose->io_base_virt - _IO_BASE;
1162 return base + (address - hose->io_base_phys);
1163 }
1164 }
1165 return (unsigned int)-1;
1166}
1167EXPORT_SYMBOL(pci_address_to_pio);
1168
1169/*
1170 * Null PCI config access functions, for the case when we can't
1171 * find a hose.
1172 */
1173#define NULL_PCI_OP(rw, size, type) \
1174static int \
1175null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1176{ \
1177 return PCIBIOS_DEVICE_NOT_FOUND; \
1178}
1179
1180static int
1181null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1182 int len, u32 *val)
1183{
1184 return PCIBIOS_DEVICE_NOT_FOUND;
1185}
1186
1187static int
1188null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1189 int len, u32 val)
1190{
1191 return PCIBIOS_DEVICE_NOT_FOUND;
1192}
1193
1194static struct pci_ops null_pci_ops =
1195{
1196 null_read_config,
1197 null_write_config
1198};
1199
1200/*
1201 * These functions are used early on before PCI scanning is done
1202 * and all of the pci_dev and pci_bus structures have been created.
1203 */
1204static struct pci_bus *
1205fake_pci_bus(struct pci_controller *hose, int busnr)
1206{
1207 static struct pci_bus bus;
1208
1209 if (hose == 0) {
1210 hose = pci_bus_to_hose(busnr);
1211 if (hose == 0)
1212 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1213 }
1214 bus.number = busnr;
1215 bus.sysdata = hose;
1216 bus.ops = hose? hose->ops: &null_pci_ops;
1217 return &bus;
1218}
1219
1220#define EARLY_PCI_OP(rw, size, type) \
1221int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1222 int devfn, int offset, type value) \
1223{ \
1224 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1225 devfn, offset, value); \
1226}
1227
1228EARLY_PCI_OP(read, byte, u8 *)
1229EARLY_PCI_OP(read, word, u16 *)
1230EARLY_PCI_OP(read, dword, u32 *)
1231EARLY_PCI_OP(write, byte, u8)
1232EARLY_PCI_OP(write, word, u16)
1233EARLY_PCI_OP(write, dword, u32)
diff --git a/arch/ppc/kernel/ppc-stub.c b/arch/ppc/kernel/ppc-stub.c
deleted file mode 100644
index 5f9ee7bb67ec..000000000000
--- a/arch/ppc/kernel/ppc-stub.c
+++ /dev/null
@@ -1,866 +0,0 @@
1/*
2 * ppc-stub.c: KGDB support for the Linux kernel.
3 *
4 * adapted from arch/sparc/kernel/sparc-stub.c for the PowerPC
5 * some stuff borrowed from Paul Mackerras' xmon
6 * Copyright (C) 1998 Michael AK Tesch (tesch@cs.wisc.edu)
7 *
8 * Modifications to run under Linux
9 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
10 *
11 * This file originally came from the gdb sources, and the
12 * copyright notices have been retained below.
13 */
14
15/****************************************************************************
16
17 THIS SOFTWARE IS NOT COPYRIGHTED
18
19 HP offers the following for use in the public domain. HP makes no
20 warranty with regard to the software or its performance and the
21 user accepts the software "AS IS" with all faults.
22
23 HP DISCLAIMS ANY WARRANTIES, EXPRESS OR IMPLIED, WITH REGARD
24 TO THIS SOFTWARE INCLUDING BUT NOT LIMITED TO THE WARRANTIES
25 OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
26
27****************************************************************************/
28
29/****************************************************************************
30 * Header: remcom.c,v 1.34 91/03/09 12:29:49 glenne Exp $
31 *
32 * Module name: remcom.c $
33 * Revision: 1.34 $
34 * Date: 91/03/09 12:29:49 $
35 * Contributor: Lake Stevens Instrument Division$
36 *
37 * Description: low level support for gdb debugger. $
38 *
39 * Considerations: only works on target hardware $
40 *
41 * Written by: Glenn Engel $
42 * ModuleState: Experimental $
43 *
44 * NOTES: See Below $
45 *
46 * Modified for SPARC by Stu Grossman, Cygnus Support.
47 *
48 * This code has been extensively tested on the Fujitsu SPARClite demo board.
49 *
50 * To enable debugger support, two things need to happen. One, a
51 * call to set_debug_traps() is necessary in order to allow any breakpoints
52 * or error conditions to be properly intercepted and reported to gdb.
53 * Two, a breakpoint needs to be generated to begin communication. This
54 * is most easily accomplished by a call to breakpoint(). Breakpoint()
55 * simulates a breakpoint by executing a trap #1.
56 *
57 *************
58 *
59 * The following gdb commands are supported:
60 *
61 * command function Return value
62 *
63 * g return the value of the CPU registers hex data or ENN
64 * G set the value of the CPU registers OK or ENN
65 * qOffsets Get section offsets. Reply is Text=xxx;Data=yyy;Bss=zzz
66 *
67 * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN
68 * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN
69 *
70 * c Resume at current address SNN ( signal NN)
71 * cAA..AA Continue at address AA..AA SNN
72 *
73 * s Step one instruction SNN
74 * sAA..AA Step one instruction from AA..AA SNN
75 *
76 * k kill
77 *
78 * ? What was the last sigval ? SNN (signal NN)
79 *
80 * bBB..BB Set baud rate to BB..BB OK or BNN, then sets
81 * baud rate
82 *
83 * All commands and responses are sent with a packet which includes a
84 * checksum. A packet consists of
85 *
86 * $<packet info>#<checksum>.
87 *
88 * where
89 * <packet info> :: <characters representing the command or response>
90 * <checksum> :: <two hex digits computed as modulo 256 sum of <packetinfo>>
91 *
92 * When a packet is received, it is first acknowledged with either '+' or '-'.
93 * '+' indicates a successful transfer. '-' indicates a failed transfer.
94 *
95 * Example:
96 *
97 * Host: Reply:
98 * $m0,10#2a +$00010203040506070809101112131415#42
99 *
100 ****************************************************************************/
101
102#include <linux/kernel.h>
103#include <linux/string.h>
104#include <linux/mm.h>
105#include <linux/smp.h>
106#include <linux/smp_lock.h>
107#include <linux/init.h>
108#include <linux/sysrq.h>
109
110#include <asm/cacheflush.h>
111#include <asm/system.h>
112#include <asm/signal.h>
113#include <asm/kgdb.h>
114#include <asm/pgtable.h>
115#include <asm/ptrace.h>
116
117void breakinst(void);
118
119/*
120 * BUFMAX defines the maximum number of characters in inbound/outbound buffers
121 * at least NUMREGBYTES*2 are needed for register packets
122 */
123#define BUFMAX 2048
124static char remcomInBuffer[BUFMAX];
125static char remcomOutBuffer[BUFMAX];
126
127static int initialized;
128static int kgdb_active;
129static int kgdb_started;
130static u_int fault_jmp_buf[100];
131static int kdebug;
132
133
134static const char hexchars[]="0123456789abcdef";
135
136/* Place where we save old trap entries for restoration - sparc*/
137/* struct tt_entry kgdb_savettable[256]; */
138/* typedef void (*trapfunc_t)(void); */
139
140static void kgdb_fault_handler(struct pt_regs *regs);
141static int handle_exception (struct pt_regs *regs);
142
143#if 0
144/* Install an exception handler for kgdb */
145static void exceptionHandler(int tnum, unsigned int *tfunc)
146{
147 /* We are dorking with a live trap table, all irqs off */
148}
149#endif
150
151int
152kgdb_setjmp(long *buf)
153{
154 asm ("mflr 0; stw 0,0(%0);"
155 "stw 1,4(%0); stw 2,8(%0);"
156 "mfcr 0; stw 0,12(%0);"
157 "stmw 13,16(%0)"
158 : : "r" (buf));
159 /* XXX should save fp regs as well */
160 return 0;
161}
162void
163kgdb_longjmp(long *buf, int val)
164{
165 if (val == 0)
166 val = 1;
167 asm ("lmw 13,16(%0);"
168 "lwz 0,12(%0); mtcrf 0x38,0;"
169 "lwz 0,0(%0); lwz 1,4(%0); lwz 2,8(%0);"
170 "mtlr 0; mr 3,%1"
171 : : "r" (buf), "r" (val));
172}
173/* Convert ch from a hex digit to an int */
174static int
175hex(unsigned char ch)
176{
177 if (ch >= 'a' && ch <= 'f')
178 return ch-'a'+10;
179 if (ch >= '0' && ch <= '9')
180 return ch-'0';
181 if (ch >= 'A' && ch <= 'F')
182 return ch-'A'+10;
183 return -1;
184}
185
186/* Convert the memory pointed to by mem into hex, placing result in buf.
187 * Return a pointer to the last char put in buf (null), in case of mem fault,
188 * return 0.
189 */
190static unsigned char *
191mem2hex(const char *mem, char *buf, int count)
192{
193 unsigned char ch;
194 unsigned short tmp_s;
195 unsigned long tmp_l;
196
197 if (kgdb_setjmp((long*)fault_jmp_buf) == 0) {
198 debugger_fault_handler = kgdb_fault_handler;
199
200 /* Accessing 16 bit and 32 bit objects in a single
201 ** load instruction is required to avoid bad side
202 ** effects for some IO registers.
203 */
204
205 if ((count == 2) && (((long)mem & 1) == 0)) {
206 tmp_s = *(unsigned short *)mem;
207 mem += 2;
208 *buf++ = hexchars[(tmp_s >> 12) & 0xf];
209 *buf++ = hexchars[(tmp_s >> 8) & 0xf];
210 *buf++ = hexchars[(tmp_s >> 4) & 0xf];
211 *buf++ = hexchars[tmp_s & 0xf];
212
213 } else if ((count == 4) && (((long)mem & 3) == 0)) {
214 tmp_l = *(unsigned int *)mem;
215 mem += 4;
216 *buf++ = hexchars[(tmp_l >> 28) & 0xf];
217 *buf++ = hexchars[(tmp_l >> 24) & 0xf];
218 *buf++ = hexchars[(tmp_l >> 20) & 0xf];
219 *buf++ = hexchars[(tmp_l >> 16) & 0xf];
220 *buf++ = hexchars[(tmp_l >> 12) & 0xf];
221 *buf++ = hexchars[(tmp_l >> 8) & 0xf];
222 *buf++ = hexchars[(tmp_l >> 4) & 0xf];
223 *buf++ = hexchars[tmp_l & 0xf];
224
225 } else {
226 while (count-- > 0) {
227 ch = *mem++;
228 *buf++ = hexchars[ch >> 4];
229 *buf++ = hexchars[ch & 0xf];
230 }
231 }
232
233 } else {
234 /* error condition */
235 }
236 debugger_fault_handler = NULL;
237 *buf = 0;
238 return buf;
239}
240
241/* convert the hex array pointed to by buf into binary to be placed in mem
242 * return a pointer to the character AFTER the last byte written.
243*/
244static char *
245hex2mem(char *buf, char *mem, int count)
246{
247 unsigned char ch;
248 int i;
249 char *orig_mem;
250 unsigned short tmp_s;
251 unsigned long tmp_l;
252
253 orig_mem = mem;
254
255 if (kgdb_setjmp((long*)fault_jmp_buf) == 0) {
256 debugger_fault_handler = kgdb_fault_handler;
257
258 /* Accessing 16 bit and 32 bit objects in a single
259 ** store instruction is required to avoid bad side
260 ** effects for some IO registers.
261 */
262
263 if ((count == 2) && (((long)mem & 1) == 0)) {
264 tmp_s = hex(*buf++) << 12;
265 tmp_s |= hex(*buf++) << 8;
266 tmp_s |= hex(*buf++) << 4;
267 tmp_s |= hex(*buf++);
268
269 *(unsigned short *)mem = tmp_s;
270 mem += 2;
271
272 } else if ((count == 4) && (((long)mem & 3) == 0)) {
273 tmp_l = hex(*buf++) << 28;
274 tmp_l |= hex(*buf++) << 24;
275 tmp_l |= hex(*buf++) << 20;
276 tmp_l |= hex(*buf++) << 16;
277 tmp_l |= hex(*buf++) << 12;
278 tmp_l |= hex(*buf++) << 8;
279 tmp_l |= hex(*buf++) << 4;
280 tmp_l |= hex(*buf++);
281
282 *(unsigned long *)mem = tmp_l;
283 mem += 4;
284
285 } else {
286 for (i=0; i<count; i++) {
287 ch = hex(*buf++) << 4;
288 ch |= hex(*buf++);
289 *mem++ = ch;
290 }
291 }
292
293
294 /*
295 ** Flush the data cache, invalidate the instruction cache.
296 */
297 flush_icache_range((int)orig_mem, (int)orig_mem + count - 1);
298
299 } else {
300 /* error condition */
301 }
302 debugger_fault_handler = NULL;
303 return mem;
304}
305
306/*
307 * While we find nice hex chars, build an int.
308 * Return number of chars processed.
309 */
310static int
311hexToInt(char **ptr, int *intValue)
312{
313 int numChars = 0;
314 int hexValue;
315
316 *intValue = 0;
317
318 if (kgdb_setjmp((long*)fault_jmp_buf) == 0) {
319 debugger_fault_handler = kgdb_fault_handler;
320 while (**ptr) {
321 hexValue = hex(**ptr);
322 if (hexValue < 0)
323 break;
324
325 *intValue = (*intValue << 4) | hexValue;
326 numChars ++;
327
328 (*ptr)++;
329 }
330 } else {
331 /* error condition */
332 }
333 debugger_fault_handler = NULL;
334
335 return (numChars);
336}
337
338/* scan for the sequence $<data>#<checksum> */
339static void
340getpacket(char *buffer)
341{
342 unsigned char checksum;
343 unsigned char xmitcsum;
344 int i;
345 int count;
346 unsigned char ch;
347
348 do {
349 /* wait around for the start character, ignore all other
350 * characters */
351 while ((ch = (getDebugChar() & 0x7f)) != '$') ;
352
353 checksum = 0;
354 xmitcsum = -1;
355
356 count = 0;
357
358 /* now, read until a # or end of buffer is found */
359 while (count < BUFMAX) {
360 ch = getDebugChar() & 0x7f;
361 if (ch == '#')
362 break;
363 checksum = checksum + ch;
364 buffer[count] = ch;
365 count = count + 1;
366 }
367
368 if (count >= BUFMAX)
369 continue;
370
371 buffer[count] = 0;
372
373 if (ch == '#') {
374 xmitcsum = hex(getDebugChar() & 0x7f) << 4;
375 xmitcsum |= hex(getDebugChar() & 0x7f);
376 if (checksum != xmitcsum)
377 putDebugChar('-'); /* failed checksum */
378 else {
379 putDebugChar('+'); /* successful transfer */
380 /* if a sequence char is present, reply the ID */
381 if (buffer[2] == ':') {
382 putDebugChar(buffer[0]);
383 putDebugChar(buffer[1]);
384 /* remove sequence chars from buffer */
385 count = strlen(buffer);
386 for (i=3; i <= count; i++)
387 buffer[i-3] = buffer[i];
388 }
389 }
390 }
391 } while (checksum != xmitcsum);
392}
393
394/* send the packet in buffer. */
395static void putpacket(unsigned char *buffer)
396{
397 unsigned char checksum;
398 int count;
399 unsigned char ch, recv;
400
401 /* $<packet info>#<checksum>. */
402 do {
403 putDebugChar('$');
404 checksum = 0;
405 count = 0;
406
407 while ((ch = buffer[count])) {
408 putDebugChar(ch);
409 checksum += ch;
410 count += 1;
411 }
412
413 putDebugChar('#');
414 putDebugChar(hexchars[checksum >> 4]);
415 putDebugChar(hexchars[checksum & 0xf]);
416 recv = getDebugChar();
417 } while ((recv & 0x7f) != '+');
418}
419
420static void kgdb_flush_cache_all(void)
421{
422 flush_instruction_cache();
423}
424
425/* Set up exception handlers for tracing and breakpoints
426 * [could be called kgdb_init()]
427 */
428void set_debug_traps(void)
429{
430#if 0
431 unsigned char c;
432
433 save_and_cli(flags);
434
435 /* In case GDB is started before us, ack any packets (presumably
436 * "$?#xx") sitting there.
437 *
438 * I've found this code causes more problems than it solves,
439 * so that's why it's commented out. GDB seems to work fine
440 * now starting either before or after the kernel -bwb
441 */
442
443 while((c = getDebugChar()) != '$');
444 while((c = getDebugChar()) != '#');
445 c = getDebugChar(); /* eat first csum byte */
446 c = getDebugChar(); /* eat second csum byte */
447 putDebugChar('+'); /* ack it */
448#endif
449 debugger = kgdb;
450 debugger_bpt = kgdb_bpt;
451 debugger_sstep = kgdb_sstep;
452 debugger_iabr_match = kgdb_iabr_match;
453 debugger_dabr_match = kgdb_dabr_match;
454
455 initialized = 1;
456}
457
458static void kgdb_fault_handler(struct pt_regs *regs)
459{
460 kgdb_longjmp((long*)fault_jmp_buf, 1);
461}
462
463int kgdb_bpt(struct pt_regs *regs)
464{
465 return handle_exception(regs);
466}
467
468int kgdb_sstep(struct pt_regs *regs)
469{
470 return handle_exception(regs);
471}
472
473void kgdb(struct pt_regs *regs)
474{
475 handle_exception(regs);
476}
477
478int kgdb_iabr_match(struct pt_regs *regs)
479{
480 printk(KERN_ERR "kgdb doesn't support iabr, what?!?\n");
481 return handle_exception(regs);
482}
483
484int kgdb_dabr_match(struct pt_regs *regs)
485{
486 printk(KERN_ERR "kgdb doesn't support dabr, what?!?\n");
487 return handle_exception(regs);
488}
489
490/* Convert the hardware trap type code to a unix signal number. */
491/*
492 * This table contains the mapping between PowerPC hardware trap types, and
493 * signals, which are primarily what GDB understands.
494 */
495static struct hard_trap_info
496{
497 unsigned int tt; /* Trap type code for powerpc */
498 unsigned char signo; /* Signal that we map this trap into */
499} hard_trap_info[] = {
500#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
501 { 0x100, SIGINT }, /* critical input interrupt */
502 { 0x200, SIGSEGV }, /* machine check */
503 { 0x300, SIGSEGV }, /* data storage */
504 { 0x400, SIGBUS }, /* instruction storage */
505 { 0x500, SIGINT }, /* interrupt */
506 { 0x600, SIGBUS }, /* alignment */
507 { 0x700, SIGILL }, /* program */
508 { 0x800, SIGILL }, /* reserved */
509 { 0x900, SIGILL }, /* reserved */
510 { 0xa00, SIGILL }, /* reserved */
511 { 0xb00, SIGILL }, /* reserved */
512 { 0xc00, SIGCHLD }, /* syscall */
513 { 0xd00, SIGILL }, /* reserved */
514 { 0xe00, SIGILL }, /* reserved */
515 { 0xf00, SIGILL }, /* reserved */
516 /*
517 ** 0x1000 PIT
518 ** 0x1010 FIT
519 ** 0x1020 watchdog
520 ** 0x1100 data TLB miss
521 ** 0x1200 instruction TLB miss
522 */
523 { 0x2002, SIGTRAP}, /* debug */
524#else
525 { 0x200, SIGSEGV }, /* machine check */
526 { 0x300, SIGSEGV }, /* address error (store) */
527 { 0x400, SIGBUS }, /* instruction bus error */
528 { 0x500, SIGINT }, /* interrupt */
529 { 0x600, SIGBUS }, /* alingment */
530 { 0x700, SIGTRAP }, /* breakpoint trap */
531 { 0x800, SIGFPE }, /* fpu unavail */
532 { 0x900, SIGALRM }, /* decrementer */
533 { 0xa00, SIGILL }, /* reserved */
534 { 0xb00, SIGILL }, /* reserved */
535 { 0xc00, SIGCHLD }, /* syscall */
536 { 0xd00, SIGTRAP }, /* single-step/watch */
537 { 0xe00, SIGFPE }, /* fp assist */
538#endif
539 { 0, 0} /* Must be last */
540
541};
542
543static int computeSignal(unsigned int tt)
544{
545 struct hard_trap_info *ht;
546
547 for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
548 if (ht->tt == tt)
549 return ht->signo;
550
551 return SIGHUP; /* default for things we don't know about */
552}
553
554#define PC_REGNUM 64
555#define SP_REGNUM 1
556
557/*
558 * This function does all command processing for interfacing to gdb.
559 */
560static int
561handle_exception (struct pt_regs *regs)
562{
563 int sigval;
564 int addr;
565 int length;
566 char *ptr;
567 unsigned int msr;
568
569 /* We don't handle user-mode breakpoints. */
570 if (user_mode(regs))
571 return 0;
572
573 if (debugger_fault_handler) {
574 debugger_fault_handler(regs);
575 panic("kgdb longjump failed!\n");
576 }
577 if (kgdb_active) {
578 printk(KERN_ERR "interrupt while in kgdb, returning\n");
579 return 0;
580 }
581
582 kgdb_active = 1;
583 kgdb_started = 1;
584
585#ifdef KGDB_DEBUG
586 printk("kgdb: entering handle_exception; trap [0x%x]\n",
587 (unsigned int)regs->trap);
588#endif
589
590 kgdb_interruptible(0);
591 lock_kernel();
592 msr = mfmsr();
593 mtmsr(msr & ~MSR_EE); /* disable interrupts */
594
595 if (regs->nip == (unsigned long)breakinst) {
596 /* Skip over breakpoint trap insn */
597 regs->nip += 4;
598 }
599
600 /* reply to host that an exception has occurred */
601 sigval = computeSignal(regs->trap);
602 ptr = remcomOutBuffer;
603
604 *ptr++ = 'T';
605 *ptr++ = hexchars[sigval >> 4];
606 *ptr++ = hexchars[sigval & 0xf];
607 *ptr++ = hexchars[PC_REGNUM >> 4];
608 *ptr++ = hexchars[PC_REGNUM & 0xf];
609 *ptr++ = ':';
610 ptr = mem2hex((char *)&regs->nip, ptr, 4);
611 *ptr++ = ';';
612 *ptr++ = hexchars[SP_REGNUM >> 4];
613 *ptr++ = hexchars[SP_REGNUM & 0xf];
614 *ptr++ = ':';
615 ptr = mem2hex(((char *)regs) + SP_REGNUM*4, ptr, 4);
616 *ptr++ = ';';
617 *ptr++ = 0;
618
619 putpacket(remcomOutBuffer);
620 if (kdebug)
621 printk("remcomOutBuffer: %s\n", remcomOutBuffer);
622
623 /* XXX We may want to add some features dealing with poking the
624 * XXX page tables, ... (look at sparc-stub.c for more info)
625 * XXX also required hacking to the gdb sources directly...
626 */
627
628 while (1) {
629 remcomOutBuffer[0] = 0;
630
631 getpacket(remcomInBuffer);
632 switch (remcomInBuffer[0]) {
633 case '?': /* report most recent signal */
634 remcomOutBuffer[0] = 'S';
635 remcomOutBuffer[1] = hexchars[sigval >> 4];
636 remcomOutBuffer[2] = hexchars[sigval & 0xf];
637 remcomOutBuffer[3] = 0;
638 break;
639#if 0
640 case 'q': /* this screws up gdb for some reason...*/
641 {
642 extern long _start, sdata, __bss_start;
643
644 ptr = &remcomInBuffer[1];
645 if (strncmp(ptr, "Offsets", 7) != 0)
646 break;
647
648 ptr = remcomOutBuffer;
649 sprintf(ptr, "Text=%8.8x;Data=%8.8x;Bss=%8.8x",
650 &_start, &sdata, &__bss_start);
651 break;
652 }
653#endif
654 case 'd':
655 /* toggle debug flag */
656 kdebug ^= 1;
657 break;
658
659 case 'g': /* return the value of the CPU registers.
660 * some of them are non-PowerPC names :(
661 * they are stored in gdb like:
662 * struct {
663 * u32 gpr[32];
664 * f64 fpr[32];
665 * u32 pc, ps, cnd, lr; (ps=msr)
666 * u32 cnt, xer, mq;
667 * }
668 */
669 {
670 int i;
671 ptr = remcomOutBuffer;
672 /* General Purpose Regs */
673 ptr = mem2hex((char *)regs, ptr, 32 * 4);
674 /* Floating Point Regs - FIXME */
675 /*ptr = mem2hex((char *), ptr, 32 * 8);*/
676 for(i=0; i<(32*8*2); i++) { /* 2chars/byte */
677 ptr[i] = '0';
678 }
679 ptr += 32*8*2;
680 /* pc, msr, cr, lr, ctr, xer, (mq is unused) */
681 ptr = mem2hex((char *)&regs->nip, ptr, 4);
682 ptr = mem2hex((char *)&regs->msr, ptr, 4);
683 ptr = mem2hex((char *)&regs->ccr, ptr, 4);
684 ptr = mem2hex((char *)&regs->link, ptr, 4);
685 ptr = mem2hex((char *)&regs->ctr, ptr, 4);
686 ptr = mem2hex((char *)&regs->xer, ptr, 4);
687 }
688 break;
689
690 case 'G': /* set the value of the CPU registers */
691 {
692 ptr = &remcomInBuffer[1];
693
694 /*
695 * If the stack pointer has moved, you should pray.
696 * (cause only god can help you).
697 */
698
699 /* General Purpose Regs */
700 hex2mem(ptr, (char *)regs, 32 * 4);
701
702 /* Floating Point Regs - FIXME?? */
703 /*ptr = hex2mem(ptr, ??, 32 * 8);*/
704 ptr += 32*8*2;
705
706 /* pc, msr, cr, lr, ctr, xer, (mq is unused) */
707 ptr = hex2mem(ptr, (char *)&regs->nip, 4);
708 ptr = hex2mem(ptr, (char *)&regs->msr, 4);
709 ptr = hex2mem(ptr, (char *)&regs->ccr, 4);
710 ptr = hex2mem(ptr, (char *)&regs->link, 4);
711 ptr = hex2mem(ptr, (char *)&regs->ctr, 4);
712 ptr = hex2mem(ptr, (char *)&regs->xer, 4);
713
714 strcpy(remcomOutBuffer,"OK");
715 }
716 break;
717 case 'H':
718 /* don't do anything, yet, just acknowledge */
719 hexToInt(&ptr, &addr);
720 strcpy(remcomOutBuffer,"OK");
721 break;
722
723 case 'm': /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */
724 /* Try to read %x,%x. */
725
726 ptr = &remcomInBuffer[1];
727
728 if (hexToInt(&ptr, &addr) && *ptr++ == ','
729 && hexToInt(&ptr, &length)) {
730 if (mem2hex((char *)addr, remcomOutBuffer,
731 length))
732 break;
733 strcpy(remcomOutBuffer, "E03");
734 } else
735 strcpy(remcomOutBuffer, "E01");
736 break;
737
738 case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK */
739 /* Try to read '%x,%x:'. */
740
741 ptr = &remcomInBuffer[1];
742
743 if (hexToInt(&ptr, &addr) && *ptr++ == ','
744 && hexToInt(&ptr, &length)
745 && *ptr++ == ':') {
746 if (hex2mem(ptr, (char *)addr, length))
747 strcpy(remcomOutBuffer, "OK");
748 else
749 strcpy(remcomOutBuffer, "E03");
750 flush_icache_range(addr, addr+length);
751 } else
752 strcpy(remcomOutBuffer, "E02");
753 break;
754
755
756 case 'k': /* kill the program, actually just continue */
757 case 'c': /* cAA..AA Continue; address AA..AA optional */
758 /* try to read optional parameter, pc unchanged if no parm */
759
760 ptr = &remcomInBuffer[1];
761 if (hexToInt(&ptr, &addr))
762 regs->nip = addr;
763
764/* Need to flush the instruction cache here, as we may have deposited a
765 * breakpoint, and the icache probably has no way of knowing that a data ref to
766 * some location may have changed something that is in the instruction cache.
767 */
768 kgdb_flush_cache_all();
769 mtmsr(msr);
770
771 kgdb_interruptible(1);
772 unlock_kernel();
773 kgdb_active = 0;
774 if (kdebug) {
775 printk("remcomInBuffer: %s\n", remcomInBuffer);
776 printk("remcomOutBuffer: %s\n", remcomOutBuffer);
777 }
778 return 1;
779
780 case 's':
781 kgdb_flush_cache_all();
782#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
783 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC);
784 regs->msr |= MSR_DE;
785#else
786 regs->msr |= MSR_SE;
787#endif
788 unlock_kernel();
789 kgdb_active = 0;
790 if (kdebug) {
791 printk("remcomInBuffer: %s\n", remcomInBuffer);
792 printk("remcomOutBuffer: %s\n", remcomOutBuffer);
793 }
794 return 1;
795
796 case 'r': /* Reset (if user process..exit ???)*/
797 panic("kgdb reset.");
798 break;
799 } /* switch */
800 if (remcomOutBuffer[0] && kdebug) {
801 printk("remcomInBuffer: %s\n", remcomInBuffer);
802 printk("remcomOutBuffer: %s\n", remcomOutBuffer);
803 }
804 /* reply to the request */
805 putpacket(remcomOutBuffer);
806 } /* while(1) */
807}
808
809/* This function will generate a breakpoint exception. It is used at the
810 beginning of a program to sync up with a debugger and can be used
811 otherwise as a quick means to stop program execution and "break" into
812 the debugger. */
813
814void
815breakpoint(void)
816{
817 if (!initialized) {
818 printk("breakpoint() called b4 kgdb init\n");
819 return;
820 }
821
822 asm(" .globl breakinst \n\
823 breakinst: .long 0x7d821008");
824}
825
826#ifdef CONFIG_KGDB_CONSOLE
827/* Output string in GDB O-packet format if GDB has connected. If nothing
828 output, returns 0 (caller must then handle output). */
829int
830kgdb_output_string (const char* s, unsigned int count)
831{
832 char buffer[512];
833
834 if (!kgdb_started)
835 return 0;
836
837 count = (count <= (sizeof(buffer) / 2 - 2))
838 ? count : (sizeof(buffer) / 2 - 2);
839
840 buffer[0] = 'O';
841 mem2hex (s, &buffer[1], count);
842 putpacket(buffer);
843
844 return 1;
845}
846#endif
847
848static void sysrq_handle_gdb(int key, struct pt_regs *pt_regs,
849 struct tty_struct *tty)
850{
851 printk("Entering GDB stub\n");
852 breakpoint();
853}
854static struct sysrq_key_op sysrq_gdb_op = {
855 .handler = sysrq_handle_gdb,
856 .help_msg = "Gdb",
857 .action_msg = "GDB",
858};
859
860static int gdb_register_sysrq(void)
861{
862 printk("Registering GDB sysrq handler\n");
863 register_sysrq_key('g', &sysrq_gdb_op);
864 return 0;
865}
866module_init(gdb_register_sysrq);
diff --git a/arch/ppc/kernel/ppc_htab.c b/arch/ppc/kernel/ppc_htab.c
deleted file mode 100644
index 9ed36dd9cbff..000000000000
--- a/arch/ppc/kernel/ppc_htab.c
+++ /dev/null
@@ -1,464 +0,0 @@
1/*
2 * PowerPC hash table management proc entry. Will show information
3 * about the current hash table and will allow changes to it.
4 *
5 * Written by Cort Dougan (cort@cs.nmt.edu)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/proc_fs.h>
16#include <linux/stat.h>
17#include <linux/sysctl.h>
18#include <linux/capability.h>
19#include <linux/ctype.h>
20#include <linux/threads.h>
21#include <linux/seq_file.h>
22#include <linux/init.h>
23#include <linux/bitops.h>
24
25#include <asm/uaccess.h>
26#include <asm/mmu.h>
27#include <asm/residual.h>
28#include <asm/io.h>
29#include <asm/pgtable.h>
30#include <asm/cputable.h>
31#include <asm/system.h>
32#include <asm/reg.h>
33
34static int ppc_htab_show(struct seq_file *m, void *v);
35static ssize_t ppc_htab_write(struct file * file, const char __user * buffer,
36 size_t count, loff_t *ppos);
37extern PTE *Hash, *Hash_end;
38extern unsigned long Hash_size, Hash_mask;
39extern unsigned long _SDR1;
40extern unsigned long htab_reloads;
41extern unsigned long htab_preloads;
42extern unsigned long htab_evicts;
43extern unsigned long pte_misses;
44extern unsigned long pte_errors;
45extern unsigned int primary_pteg_full;
46extern unsigned int htab_hash_searches;
47
48static int ppc_htab_open(struct inode *inode, struct file *file)
49{
50 return single_open(file, ppc_htab_show, NULL);
51}
52
53const struct file_operations ppc_htab_operations = {
54 .open = ppc_htab_open,
55 .read = seq_read,
56 .llseek = seq_lseek,
57 .write = ppc_htab_write,
58 .release = single_release,
59};
60
61static char *pmc1_lookup(unsigned long mmcr0)
62{
63 switch ( mmcr0 & (0x7f<<7) )
64 {
65 case 0x0:
66 return "none";
67 case MMCR0_PMC1_CYCLES:
68 return "cycles";
69 case MMCR0_PMC1_ICACHEMISS:
70 return "ic miss";
71 case MMCR0_PMC1_DTLB:
72 return "dtlb miss";
73 default:
74 return "unknown";
75 }
76}
77
78static char *pmc2_lookup(unsigned long mmcr0)
79{
80 switch ( mmcr0 & 0x3f )
81 {
82 case 0x0:
83 return "none";
84 case MMCR0_PMC2_CYCLES:
85 return "cycles";
86 case MMCR0_PMC2_DCACHEMISS:
87 return "dc miss";
88 case MMCR0_PMC2_ITLB:
89 return "itlb miss";
90 case MMCR0_PMC2_LOADMISSTIME:
91 return "load miss time";
92 default:
93 return "unknown";
94 }
95}
96
97/*
98 * print some useful info about the hash table. This function
99 * is _REALLY_ slow (see the nested for loops below) but nothing
100 * in here should be really timing critical. -- Cort
101 */
102static int ppc_htab_show(struct seq_file *m, void *v)
103{
104 unsigned long mmcr0 = 0, pmc1 = 0, pmc2 = 0;
105#if defined(CONFIG_PPC_STD_MMU)
106 unsigned int kptes = 0, uptes = 0;
107 PTE *ptr;
108#endif /* CONFIG_PPC_STD_MMU */
109
110 if (cpu_has_feature(CPU_FTR_604_PERF_MON)) {
111 mmcr0 = mfspr(SPRN_MMCR0);
112 pmc1 = mfspr(SPRN_PMC1);
113 pmc2 = mfspr(SPRN_PMC2);
114 seq_printf(m,
115 "604 Performance Monitoring\n"
116 "MMCR0\t\t: %08lx %s%s ",
117 mmcr0,
118 ( mmcr0>>28 & 0x2 ) ? "(user mode counted)" : "",
119 ( mmcr0>>28 & 0x4 ) ? "(kernel mode counted)" : "");
120 seq_printf(m,
121 "\nPMC1\t\t: %08lx (%s)\n"
122 "PMC2\t\t: %08lx (%s)\n",
123 pmc1, pmc1_lookup(mmcr0),
124 pmc2, pmc2_lookup(mmcr0));
125 }
126
127#ifdef CONFIG_PPC_STD_MMU
128 /* if we don't have a htab */
129 if ( Hash_size == 0 ) {
130 seq_printf(m, "No Hash Table used\n");
131 return 0;
132 }
133
134 for (ptr = Hash; ptr < Hash_end; ptr++) {
135 unsigned int mctx, vsid;
136
137 if (!ptr->v)
138 continue;
139 /* undo the esid skew */
140 vsid = ptr->vsid;
141 mctx = ((vsid - (vsid & 0xf) * 0x111) >> 4) & 0xfffff;
142 if (mctx == 0)
143 kptes++;
144 else
145 uptes++;
146 }
147
148 seq_printf(m,
149 "PTE Hash Table Information\n"
150 "Size\t\t: %luKb\n"
151 "Buckets\t\t: %lu\n"
152 "Address\t\t: %08lx\n"
153 "Entries\t\t: %lu\n"
154 "User ptes\t: %u\n"
155 "Kernel ptes\t: %u\n"
156 "Percent full\t: %lu%%\n"
157 , (unsigned long)(Hash_size>>10),
158 (Hash_size/(sizeof(PTE)*8)),
159 (unsigned long)Hash,
160 Hash_size/sizeof(PTE)
161 , uptes,
162 kptes,
163 ((kptes+uptes)*100) / (Hash_size/sizeof(PTE))
164 );
165
166 seq_printf(m,
167 "Reloads\t\t: %lu\n"
168 "Preloads\t: %lu\n"
169 "Searches\t: %u\n"
170 "Overflows\t: %u\n"
171 "Evicts\t\t: %lu\n",
172 htab_reloads, htab_preloads, htab_hash_searches,
173 primary_pteg_full, htab_evicts);
174#endif /* CONFIG_PPC_STD_MMU */
175
176 seq_printf(m,
177 "Non-error misses: %lu\n"
178 "Error misses\t: %lu\n",
179 pte_misses, pte_errors);
180 return 0;
181}
182
183/*
184 * Allow user to define performance counters and resize the hash table
185 */
186static ssize_t ppc_htab_write(struct file * file, const char __user * ubuffer,
187 size_t count, loff_t *ppos)
188{
189#ifdef CONFIG_PPC_STD_MMU
190 unsigned long tmp;
191 char buffer[16];
192
193 if (!capable(CAP_SYS_ADMIN))
194 return -EACCES;
195 if (strncpy_from_user(buffer, ubuffer, 15))
196 return -EFAULT;
197 buffer[15] = 0;
198
199 /* don't set the htab size for now */
200 if ( !strncmp( buffer, "size ", 5) )
201 return -EBUSY;
202
203 if ( !strncmp( buffer, "reset", 5) )
204 {
205 if (cpu_has_feature(CPU_FTR_604_PERF_MON)) {
206 /* reset PMC1 and PMC2 */
207 mtspr(SPRN_PMC1, 0);
208 mtspr(SPRN_PMC2, 0);
209 }
210 htab_reloads = 0;
211 htab_evicts = 0;
212 pte_misses = 0;
213 pte_errors = 0;
214 }
215
216 /* Everything below here requires the performance monitor feature. */
217 if (!cpu_has_feature(CPU_FTR_604_PERF_MON))
218 return count;
219
220 /* turn off performance monitoring */
221 if ( !strncmp( buffer, "off", 3) )
222 {
223 mtspr(SPRN_MMCR0, 0);
224 mtspr(SPRN_PMC1, 0);
225 mtspr(SPRN_PMC2, 0);
226 }
227
228 if ( !strncmp( buffer, "user", 4) )
229 {
230 /* setup mmcr0 and clear the correct pmc */
231 tmp = (mfspr(SPRN_MMCR0) & ~(0x60000000)) | 0x20000000;
232 mtspr(SPRN_MMCR0, tmp);
233 mtspr(SPRN_PMC1, 0);
234 mtspr(SPRN_PMC2, 0);
235 }
236
237 if ( !strncmp( buffer, "kernel", 6) )
238 {
239 /* setup mmcr0 and clear the correct pmc */
240 tmp = (mfspr(SPRN_MMCR0) & ~(0x60000000)) | 0x40000000;
241 mtspr(SPRN_MMCR0, tmp);
242 mtspr(SPRN_PMC1, 0);
243 mtspr(SPRN_PMC2, 0);
244 }
245
246 /* PMC1 values */
247 if ( !strncmp( buffer, "dtlb", 4) )
248 {
249 /* setup mmcr0 and clear the correct pmc */
250 tmp = (mfspr(SPRN_MMCR0) & ~(0x7F << 7)) | MMCR0_PMC1_DTLB;
251 mtspr(SPRN_MMCR0, tmp);
252 mtspr(SPRN_PMC1, 0);
253 }
254
255 if ( !strncmp( buffer, "ic miss", 7) )
256 {
257 /* setup mmcr0 and clear the correct pmc */
258 tmp = (mfspr(SPRN_MMCR0) & ~(0x7F<<7)) | MMCR0_PMC1_ICACHEMISS;
259 mtspr(SPRN_MMCR0, tmp);
260 mtspr(SPRN_PMC1, 0);
261 }
262
263 /* PMC2 values */
264 if ( !strncmp( buffer, "load miss time", 14) )
265 {
266 /* setup mmcr0 and clear the correct pmc */
267 asm volatile(
268 "mfspr %0,%1\n\t" /* get current mccr0 */
269 "rlwinm %0,%0,0,0,31-6\n\t" /* clear bits [26-31] */
270 "ori %0,%0,%2 \n\t" /* or in mmcr0 settings */
271 "mtspr %1,%0 \n\t" /* set new mccr0 */
272 "mtspr %3,%4 \n\t" /* reset the pmc */
273 : "=r" (tmp)
274 : "i" (SPRN_MMCR0),
275 "i" (MMCR0_PMC2_LOADMISSTIME),
276 "i" (SPRN_PMC2), "r" (0) );
277 }
278
279 if ( !strncmp( buffer, "itlb", 4) )
280 {
281 /* setup mmcr0 and clear the correct pmc */
282 asm volatile(
283 "mfspr %0,%1\n\t" /* get current mccr0 */
284 "rlwinm %0,%0,0,0,31-6\n\t" /* clear bits [26-31] */
285 "ori %0,%0,%2 \n\t" /* or in mmcr0 settings */
286 "mtspr %1,%0 \n\t" /* set new mccr0 */
287 "mtspr %3,%4 \n\t" /* reset the pmc */
288 : "=r" (tmp)
289 : "i" (SPRN_MMCR0), "i" (MMCR0_PMC2_ITLB),
290 "i" (SPRN_PMC2), "r" (0) );
291 }
292
293 if ( !strncmp( buffer, "dc miss", 7) )
294 {
295 /* setup mmcr0 and clear the correct pmc */
296 asm volatile(
297 "mfspr %0,%1\n\t" /* get current mccr0 */
298 "rlwinm %0,%0,0,0,31-6\n\t" /* clear bits [26-31] */
299 "ori %0,%0,%2 \n\t" /* or in mmcr0 settings */
300 "mtspr %1,%0 \n\t" /* set new mccr0 */
301 "mtspr %3,%4 \n\t" /* reset the pmc */
302 : "=r" (tmp)
303 : "i" (SPRN_MMCR0), "i" (MMCR0_PMC2_DCACHEMISS),
304 "i" (SPRN_PMC2), "r" (0) );
305 }
306
307 return count;
308#else /* CONFIG_PPC_STD_MMU */
309 return 0;
310#endif /* CONFIG_PPC_STD_MMU */
311}
312
313int proc_dol2crvec(ctl_table *table, int write, struct file *filp,
314 void __user *buffer_arg, size_t *lenp, loff_t *ppos)
315{
316 int vleft, first=1, len, left, val;
317 char __user *buffer = (char __user *) buffer_arg;
318 #define TMPBUFLEN 256
319 char buf[TMPBUFLEN], *p;
320 static const char *sizestrings[4] = {
321 "2MB", "256KB", "512KB", "1MB"
322 };
323 static const char *clockstrings[8] = {
324 "clock disabled", "+1 clock", "+1.5 clock", "reserved(3)",
325 "+2 clock", "+2.5 clock", "+3 clock", "reserved(7)"
326 };
327 static const char *typestrings[4] = {
328 "flow-through burst SRAM", "reserved SRAM",
329 "pipelined burst SRAM", "pipelined late-write SRAM"
330 };
331 static const char *holdstrings[4] = {
332 "0.5", "1.0", "(reserved2)", "(reserved3)"
333 };
334
335 if (!cpu_has_feature(CPU_FTR_L2CR))
336 return -EFAULT;
337
338 if ( /*!table->maxlen ||*/ (*ppos && !write)) {
339 *lenp = 0;
340 return 0;
341 }
342
343 vleft = table->maxlen / sizeof(int);
344 left = *lenp;
345
346 for (; left /*&& vleft--*/; first=0) {
347 if (write) {
348 while (left) {
349 char c;
350 if(get_user(c, buffer))
351 return -EFAULT;
352 if (!isspace(c))
353 break;
354 left--;
355 buffer++;
356 }
357 if (!left)
358 break;
359 len = left;
360 if (len > TMPBUFLEN-1)
361 len = TMPBUFLEN-1;
362 if(copy_from_user(buf, buffer, len))
363 return -EFAULT;
364 buf[len] = 0;
365 p = buf;
366 if (*p < '0' || *p > '9')
367 break;
368 val = simple_strtoul(p, &p, 0);
369 len = p-buf;
370 if ((len < left) && *p && !isspace(*p))
371 break;
372 buffer += len;
373 left -= len;
374 _set_L2CR(val);
375 } else {
376 p = buf;
377 if (!first)
378 *p++ = '\t';
379 val = _get_L2CR();
380 p += sprintf(p, "0x%08x: ", val);
381 p += sprintf(p, " %s", (val >> 31) & 1 ? "enabled" :
382 "disabled");
383 p += sprintf(p, ", %sparity", (val>>30)&1 ? "" : "no ");
384 p += sprintf(p, ", %s", sizestrings[(val >> 28) & 3]);
385 p += sprintf(p, ", %s", clockstrings[(val >> 25) & 7]);
386 p += sprintf(p, ", %s", typestrings[(val >> 23) & 2]);
387 p += sprintf(p, "%s", (val>>22)&1 ? ", data only" : "");
388 p += sprintf(p, "%s", (val>>20)&1 ? ", ZZ enabled": "");
389 p += sprintf(p, ", %s", (val>>19)&1 ? "write-through" :
390 "copy-back");
391 p += sprintf(p, "%s", (val>>18)&1 ? ", testing" : "");
392 p += sprintf(p, ", %sns hold",holdstrings[(val>>16)&3]);
393 p += sprintf(p, "%s", (val>>15)&1 ? ", DLL slow" : "");
394 p += sprintf(p, "%s", (val>>14)&1 ? ", diff clock" :"");
395 p += sprintf(p, "%s", (val>>13)&1 ? ", DLL bypass" :"");
396
397 p += sprintf(p,"\n");
398
399 len = strlen(buf);
400 if (len > left)
401 len = left;
402 if (copy_to_user(buffer, buf, len))
403 return -EFAULT;
404 left -= len;
405 buffer += len;
406 break;
407 }
408 }
409
410 if (!write && !first && left) {
411 if(put_user('\n', (char __user *) buffer))
412 return -EFAULT;
413 left--, buffer++;
414 }
415 if (write) {
416 char __user *s = (char __user *) buffer;
417 while (left) {
418 char c;
419 if(get_user(c, s++))
420 return -EFAULT;
421 if (!isspace(c))
422 break;
423 left--;
424 }
425 }
426 if (write && first)
427 return -EINVAL;
428 *lenp -= left;
429 *ppos += *lenp;
430 return 0;
431}
432
433#ifdef CONFIG_SYSCTL
434/*
435 * Register our sysctl.
436 */
437static ctl_table htab_ctl_table[]={
438 {
439 .procname = "l2cr",
440 .mode = 0644,
441 .proc_handler = &proc_dol2crvec,
442 },
443 {}
444};
445static ctl_table htab_sysctl_root[] = {
446 {
447 .ctl_name = CTL_KERN,
448 .procname = "kernel",
449 .mode = 0555,
450 .child = htab_ctl_table,
451 },
452 {}
453};
454
455static int __init
456register_ppc_htab_sysctl(void)
457{
458 register_sysctl_table(htab_sysctl_root);
459
460 return 0;
461}
462
463__initcall(register_ppc_htab_sysctl);
464#endif
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
deleted file mode 100644
index 5d529bcbeee9..000000000000
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ /dev/null
@@ -1,258 +0,0 @@
1#include <linux/module.h>
2#include <linux/threads.h>
3#include <linux/smp.h>
4#include <linux/sched.h>
5#include <linux/elfcore.h>
6#include <linux/string.h>
7#include <linux/interrupt.h>
8#include <linux/screen_info.h>
9#include <linux/vt_kern.h>
10#include <linux/nvram.h>
11#include <linux/console.h>
12#include <linux/irq.h>
13#include <linux/pci.h>
14#include <linux/delay.h>
15#include <linux/pm.h>
16#include <linux/bitops.h>
17
18#include <asm/page.h>
19#include <asm/processor.h>
20#include <asm/uaccess.h>
21#include <asm/io.h>
22#include <asm/ide.h>
23#include <asm/atomic.h>
24#include <asm/checksum.h>
25#include <asm/pgtable.h>
26#include <asm/tlbflush.h>
27#include <asm/cacheflush.h>
28#include <linux/adb.h>
29#include <linux/cuda.h>
30#include <linux/pmu.h>
31#include <asm/system.h>
32#include <asm/pci-bridge.h>
33#include <asm/irq.h>
34#include <asm/dma.h>
35#include <asm/machdep.h>
36#include <asm/hw_irq.h>
37#include <asm/nvram.h>
38#include <asm/mmu_context.h>
39#include <asm/backlight.h>
40#include <asm/time.h>
41#include <asm/cputable.h>
42#include <asm/btext.h>
43#include <asm/xmon.h>
44#include <asm/signal.h>
45#include <asm/dcr.h>
46
47#ifdef CONFIG_8xx
48#include <asm/cpm1.h>
49#endif
50
51extern void transfer_to_handler(void);
52extern void do_IRQ(struct pt_regs *regs);
53extern void machine_check_exception(struct pt_regs *regs);
54extern void alignment_exception(struct pt_regs *regs);
55extern void program_check_exception(struct pt_regs *regs);
56extern void single_step_exception(struct pt_regs *regs);
57extern int sys_sigreturn(struct pt_regs *regs);
58
59long long __ashrdi3(long long, int);
60long long __ashldi3(long long, int);
61long long __lshrdi3(long long, int);
62
63EXPORT_SYMBOL(empty_zero_page);
64EXPORT_SYMBOL(clear_pages);
65EXPORT_SYMBOL(clear_user_page);
66EXPORT_SYMBOL(copy_page);
67EXPORT_SYMBOL(transfer_to_handler);
68EXPORT_SYMBOL(do_IRQ);
69EXPORT_SYMBOL(machine_check_exception);
70EXPORT_SYMBOL(alignment_exception);
71EXPORT_SYMBOL(program_check_exception);
72EXPORT_SYMBOL(single_step_exception);
73EXPORT_SYMBOL(sys_sigreturn);
74EXPORT_SYMBOL(ppc_n_lost_interrupts);
75
76EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
77EXPORT_SYMBOL(DMA_MODE_READ);
78EXPORT_SYMBOL(DMA_MODE_WRITE);
79
80#if !defined(__INLINE_BITOPS)
81EXPORT_SYMBOL(set_bit);
82EXPORT_SYMBOL(clear_bit);
83EXPORT_SYMBOL(change_bit);
84EXPORT_SYMBOL(test_and_set_bit);
85EXPORT_SYMBOL(test_and_clear_bit);
86EXPORT_SYMBOL(test_and_change_bit);
87#endif /* __INLINE_BITOPS */
88
89EXPORT_SYMBOL(strcpy);
90EXPORT_SYMBOL(strncpy);
91EXPORT_SYMBOL(strcat);
92EXPORT_SYMBOL(strlen);
93EXPORT_SYMBOL(strcmp);
94EXPORT_SYMBOL(strncmp);
95
96EXPORT_SYMBOL(csum_partial);
97EXPORT_SYMBOL(csum_partial_copy_generic);
98EXPORT_SYMBOL(ip_fast_csum);
99EXPORT_SYMBOL(csum_tcpudp_magic);
100
101EXPORT_SYMBOL(__copy_tofrom_user);
102EXPORT_SYMBOL(__clear_user);
103EXPORT_SYMBOL(__strncpy_from_user);
104EXPORT_SYMBOL(__strnlen_user);
105
106/*
107EXPORT_SYMBOL(inb);
108EXPORT_SYMBOL(inw);
109EXPORT_SYMBOL(inl);
110EXPORT_SYMBOL(outb);
111EXPORT_SYMBOL(outw);
112EXPORT_SYMBOL(outl);
113EXPORT_SYMBOL(outsl);*/
114
115EXPORT_SYMBOL(_insb);
116EXPORT_SYMBOL(_outsb);
117EXPORT_SYMBOL(_insw_ns);
118EXPORT_SYMBOL(_outsw_ns);
119EXPORT_SYMBOL(_insl_ns);
120EXPORT_SYMBOL(_outsl_ns);
121EXPORT_SYMBOL(iopa);
122EXPORT_SYMBOL(ioremap);
123#ifdef CONFIG_44x
124EXPORT_SYMBOL(ioremap64);
125#endif
126EXPORT_SYMBOL(__ioremap);
127EXPORT_SYMBOL(iounmap);
128EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
129
130#ifdef CONFIG_PCI
131EXPORT_SYMBOL(isa_io_base);
132EXPORT_SYMBOL(isa_mem_base);
133EXPORT_SYMBOL(pci_dram_offset);
134EXPORT_SYMBOL(pci_alloc_consistent);
135EXPORT_SYMBOL(pci_free_consistent);
136EXPORT_SYMBOL(pci_bus_io_base);
137EXPORT_SYMBOL(pci_bus_io_base_phys);
138EXPORT_SYMBOL(pci_bus_mem_base_phys);
139EXPORT_SYMBOL(pci_bus_to_hose);
140EXPORT_SYMBOL(pci_resource_to_bus);
141EXPORT_SYMBOL(pci_phys_to_bus);
142EXPORT_SYMBOL(pci_bus_to_phys);
143#endif /* CONFIG_PCI */
144
145#ifdef CONFIG_NOT_COHERENT_CACHE
146extern void flush_dcache_all(void);
147EXPORT_SYMBOL(flush_dcache_all);
148#endif
149
150EXPORT_SYMBOL(start_thread);
151EXPORT_SYMBOL(kernel_thread);
152
153EXPORT_SYMBOL(flush_instruction_cache);
154EXPORT_SYMBOL(giveup_fpu);
155EXPORT_SYMBOL(__flush_icache_range);
156EXPORT_SYMBOL(flush_dcache_range);
157EXPORT_SYMBOL(flush_icache_user_range);
158EXPORT_SYMBOL(flush_dcache_page);
159EXPORT_SYMBOL(flush_tlb_kernel_range);
160EXPORT_SYMBOL(flush_tlb_page);
161EXPORT_SYMBOL(_tlbie);
162#ifdef CONFIG_ALTIVEC
163#ifndef CONFIG_SMP
164EXPORT_SYMBOL(last_task_used_altivec);
165#endif
166EXPORT_SYMBOL(giveup_altivec);
167#endif /* CONFIG_ALTIVEC */
168#ifdef CONFIG_SMP
169EXPORT_SYMBOL(smp_call_function);
170EXPORT_SYMBOL(smp_hw_index);
171#endif
172
173EXPORT_SYMBOL(ppc_md);
174
175#ifdef CONFIG_ADB
176EXPORT_SYMBOL(adb_request);
177EXPORT_SYMBOL(adb_register);
178EXPORT_SYMBOL(adb_unregister);
179EXPORT_SYMBOL(adb_poll);
180EXPORT_SYMBOL(adb_try_handler_change);
181#endif /* CONFIG_ADB */
182#ifdef CONFIG_ADB_CUDA
183EXPORT_SYMBOL(cuda_request);
184EXPORT_SYMBOL(cuda_poll);
185#endif /* CONFIG_ADB_CUDA */
186#if defined(CONFIG_BOOTX_TEXT)
187EXPORT_SYMBOL(btext_update_display);
188#endif
189EXPORT_SYMBOL(to_tm);
190
191EXPORT_SYMBOL(pm_power_off);
192
193EXPORT_SYMBOL(__ashrdi3);
194EXPORT_SYMBOL(__ashldi3);
195EXPORT_SYMBOL(__lshrdi3);
196EXPORT_SYMBOL(memcpy);
197EXPORT_SYMBOL(cacheable_memcpy);
198EXPORT_SYMBOL(memset);
199EXPORT_SYMBOL(memmove);
200EXPORT_SYMBOL(memcmp);
201EXPORT_SYMBOL(memchr);
202
203#if defined(CONFIG_FB_VGA16_MODULE)
204EXPORT_SYMBOL(screen_info);
205#endif
206
207EXPORT_SYMBOL(__delay);
208EXPORT_SYMBOL(timer_interrupt);
209EXPORT_SYMBOL(irq_desc);
210EXPORT_SYMBOL(tb_ticks_per_jiffy);
211EXPORT_SYMBOL(console_drivers);
212#ifdef CONFIG_XMON
213EXPORT_SYMBOL(xmon);
214EXPORT_SYMBOL(xmon_printf);
215#endif
216
217#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
218extern void (*debugger)(struct pt_regs *regs);
219extern int (*debugger_bpt)(struct pt_regs *regs);
220extern int (*debugger_sstep)(struct pt_regs *regs);
221extern int (*debugger_iabr_match)(struct pt_regs *regs);
222extern int (*debugger_dabr_match)(struct pt_regs *regs);
223extern void (*debugger_fault_handler)(struct pt_regs *regs);
224
225EXPORT_SYMBOL(debugger);
226EXPORT_SYMBOL(debugger_bpt);
227EXPORT_SYMBOL(debugger_sstep);
228EXPORT_SYMBOL(debugger_iabr_match);
229EXPORT_SYMBOL(debugger_dabr_match);
230EXPORT_SYMBOL(debugger_fault_handler);
231#endif
232
233#ifdef CONFIG_8xx
234EXPORT_SYMBOL(cpm_install_handler);
235EXPORT_SYMBOL(cpm_free_handler);
236#endif /* CONFIG_8xx */
237#if defined(CONFIG_8xx) || defined(CONFIG_40x)
238EXPORT_SYMBOL(__res);
239#endif
240
241EXPORT_SYMBOL(next_mmu_context);
242EXPORT_SYMBOL(set_context);
243EXPORT_SYMBOL(disarm_decr);
244#ifdef CONFIG_PPC_STD_MMU
245extern long mol_trampoline;
246EXPORT_SYMBOL(mol_trampoline); /* For MOL */
247EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
248#ifdef CONFIG_SMP
249extern int mmu_hash_lock;
250EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
251#endif /* CONFIG_SMP */
252extern long *intercept_table;
253EXPORT_SYMBOL(intercept_table);
254#endif /* CONFIG_PPC_STD_MMU */
255#ifdef CONFIG_PPC_DCR_NATIVE
256EXPORT_SYMBOL(__mtdcr);
257EXPORT_SYMBOL(__mfdcr);
258#endif
diff --git a/arch/ppc/kernel/relocate_kernel.S b/arch/ppc/kernel/relocate_kernel.S
deleted file mode 100644
index 9b2ad48e988c..000000000000
--- a/arch/ppc/kernel/relocate_kernel.S
+++ /dev/null
@@ -1,123 +0,0 @@
1/*
2 * relocate_kernel.S - put the kernel image in place to boot
3 * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
4 *
5 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
6 *
7 * This source code is licensed under the GNU General Public License,
8 * Version 2. See the file COPYING for more details.
9 */
10
11#include <asm/reg.h>
12#include <asm/ppc_asm.h>
13#include <asm/processor.h>
14
15#include <asm/kexec.h>
16
17#define PAGE_SIZE 4096 /* must be same value as in <asm/page.h> */
18
19 /*
20 * Must be relocatable PIC code callable as a C function.
21 */
22 .globl relocate_new_kernel
23relocate_new_kernel:
24 /* r3 = page_list */
25 /* r4 = reboot_code_buffer */
26 /* r5 = start_address */
27
28 li r0, 0
29
30 /*
31 * Set Machine Status Register to a known status,
32 * switch the MMU off and jump to 1: in a single step.
33 */
34
35 mr r8, r0
36 ori r8, r8, MSR_RI|MSR_ME
37 mtspr SPRN_SRR1, r8
38 addi r8, r4, 1f - relocate_new_kernel
39 mtspr SPRN_SRR0, r8
40 sync
41 rfi
42
431:
44 /* from this point address translation is turned off */
45 /* and interrupts are disabled */
46
47 /* set a new stack at the bottom of our page... */
48 /* (not really needed now) */
49 addi r1, r4, KEXEC_CONTROL_CODE_SIZE - 8 /* for LR Save+Back Chain */
50 stw r0, 0(r1)
51
52 /* Do the copies */
53 li r6, 0 /* checksum */
54 mr r0, r3
55 b 1f
56
570: /* top, read another word for the indirection page */
58 lwzu r0, 4(r3)
59
601:
61 /* is it a destination page? (r8) */
62 rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
63 beq 2f
64
65 rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */
66 b 0b
67
682: /* is it an indirection page? (r3) */
69 rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
70 beq 2f
71
72 rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */
73 subi r3, r3, 4
74 b 0b
75
762: /* are we done? */
77 rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
78 beq 2f
79 b 3f
80
812: /* is it a source page? (r9) */
82 rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
83 beq 0b
84
85 rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */
86
87 li r7, PAGE_SIZE / 4
88 mtctr r7
89 subi r9, r9, 4
90 subi r8, r8, 4
919:
92 lwzu r0, 4(r9) /* do the copy */
93 xor r6, r6, r0
94 stwu r0, 4(r8)
95 dcbst 0, r8
96 sync
97 icbi 0, r8
98 bdnz 9b
99
100 addi r9, r9, 4
101 addi r8, r8, 4
102 b 0b
103
1043:
105
106 /* To be certain of avoiding problems with self-modifying code
107 * execute a serializing instruction here.
108 */
109 isync
110 sync
111
112 /* jump to the entry point, usually the setup routine */
113 mtlr r5
114 blrl
115
1161: b 1b
117
118relocate_new_kernel_end:
119
120 .globl relocate_new_kernel_size
121relocate_new_kernel_size:
122 .long relocate_new_kernel_end - relocate_new_kernel
123
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
deleted file mode 100644
index 51e8094f52d6..000000000000
--- a/arch/ppc/kernel/setup.c
+++ /dev/null
@@ -1,572 +0,0 @@
1/*
2 * Common prep boot and setup code.
3 */
4
5#include <linux/module.h>
6#include <linux/string.h>
7#include <linux/sched.h>
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/reboot.h>
11#include <linux/delay.h>
12#include <linux/initrd.h>
13#include <linux/screen_info.h>
14#include <linux/bootmem.h>
15#include <linux/seq_file.h>
16#include <linux/root_dev.h>
17#include <linux/cpu.h>
18#include <linux/console.h>
19
20#include <asm/residual.h>
21#include <asm/io.h>
22#include <asm/prom.h>
23#include <asm/processor.h>
24#include <asm/pgtable.h>
25#include <asm/bootinfo.h>
26#include <asm/setup.h>
27#include <asm/smp.h>
28#include <asm/elf.h>
29#include <asm/cputable.h>
30#include <asm/bootx.h>
31#include <asm/btext.h>
32#include <asm/machdep.h>
33#include <asm/uaccess.h>
34#include <asm/system.h>
35#include <asm/sections.h>
36#include <asm/nvram.h>
37#include <asm/xmon.h>
38#include <asm/ocp.h>
39#include <asm/irq.h>
40
41#define USES_PPC_SYS (defined(CONFIG_MPC10X_BRIDGE) || defined(CONFIG_8260) || \
42 defined(CONFIG_PPC_MPC52xx))
43
44#if USES_PPC_SYS
45#include <asm/ppc_sys.h>
46#endif
47
48#if defined CONFIG_KGDB
49#include <asm/kgdb.h>
50#endif
51
52extern void platform_init(unsigned long r3, unsigned long r4,
53 unsigned long r5, unsigned long r6, unsigned long r7);
54extern void reloc_got2(unsigned long offset);
55
56extern void ppc6xx_idle(void);
57extern void power4_idle(void);
58
59extern boot_infos_t *boot_infos;
60
61/* Used with the BI_MEMSIZE bootinfo parameter to store the memory
62 size value reported by the boot loader. */
63unsigned long boot_mem_size;
64
65unsigned long ISA_DMA_THRESHOLD;
66unsigned int DMA_MODE_READ;
67unsigned int DMA_MODE_WRITE;
68
69#ifdef CONFIG_PPC_PREP
70extern void prep_init(unsigned long r3, unsigned long r4,
71 unsigned long r5, unsigned long r6, unsigned long r7);
72
73dev_t boot_dev;
74#endif /* CONFIG_PPC_PREP */
75
76int have_of;
77EXPORT_SYMBOL(have_of);
78
79#ifdef __DO_IRQ_CANON
80int ppc_do_canonicalize_irqs;
81EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
82#endif
83
84#ifdef CONFIG_VGA_CONSOLE
85unsigned long vgacon_remap_base;
86#endif
87
88struct machdep_calls ppc_md;
89
90/*
91 * These are used in binfmt_elf.c to put aux entries on the stack
92 * for each elf executable being started.
93 */
94int dcache_bsize;
95int icache_bsize;
96int ucache_bsize;
97
98#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_FB_VGA16) || \
99 defined(CONFIG_FB_VGA16_MODULE) || defined(CONFIG_FB_VESA)
100struct screen_info screen_info = {
101 0, 25, /* orig-x, orig-y */
102 0, /* unused */
103 0, /* orig-video-page */
104 0, /* orig-video-mode */
105 80, /* orig-video-cols */
106 0,0,0, /* ega_ax, ega_bx, ega_cx */
107 25, /* orig-video-lines */
108 1, /* orig-video-isVGA */
109 16 /* orig-video-points */
110};
111#endif /* CONFIG_VGA_CONSOLE || CONFIG_FB_VGA16 || CONFIG_FB_VESA */
112
113void machine_restart(char *cmd)
114{
115#ifdef CONFIG_NVRAM
116 nvram_sync();
117#endif
118 ppc_md.restart(cmd);
119}
120
121static void ppc_generic_power_off(void)
122{
123 ppc_md.power_off();
124}
125
126void machine_halt(void)
127{
128#ifdef CONFIG_NVRAM
129 nvram_sync();
130#endif
131 ppc_md.halt();
132}
133
134void (*pm_power_off)(void) = ppc_generic_power_off;
135
136void machine_power_off(void)
137{
138#ifdef CONFIG_NVRAM
139 nvram_sync();
140#endif
141 if (pm_power_off)
142 pm_power_off();
143 ppc_generic_power_off();
144}
145
146#ifdef CONFIG_TAU
147extern u32 cpu_temp(unsigned long cpu);
148extern u32 cpu_temp_both(unsigned long cpu);
149#endif /* CONFIG_TAU */
150
151int show_cpuinfo(struct seq_file *m, void *v)
152{
153 int i = (int) v - 1;
154 int err = 0;
155 unsigned int pvr;
156 unsigned short maj, min;
157 unsigned long lpj;
158
159 if (i >= NR_CPUS) {
160 /* Show summary information */
161#ifdef CONFIG_SMP
162 unsigned long bogosum = 0;
163 for_each_online_cpu(i)
164 bogosum += cpu_data[i].loops_per_jiffy;
165 seq_printf(m, "total bogomips\t: %lu.%02lu\n",
166 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
167#endif /* CONFIG_SMP */
168
169 if (ppc_md.show_cpuinfo != NULL)
170 err = ppc_md.show_cpuinfo(m);
171 return err;
172 }
173
174#ifdef CONFIG_SMP
175 if (!cpu_online(i))
176 return 0;
177 pvr = cpu_data[i].pvr;
178 lpj = cpu_data[i].loops_per_jiffy;
179#else
180 pvr = mfspr(SPRN_PVR);
181 lpj = loops_per_jiffy;
182#endif
183
184 seq_printf(m, "processor\t: %d\n", i);
185 seq_printf(m, "cpu\t\t: ");
186
187 if (cur_cpu_spec->pvr_mask)
188 seq_printf(m, "%s", cur_cpu_spec->cpu_name);
189 else
190 seq_printf(m, "unknown (%08x)", pvr);
191#ifdef CONFIG_ALTIVEC
192 if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
193 seq_printf(m, ", altivec supported");
194#endif
195 seq_printf(m, "\n");
196
197#ifdef CONFIG_TAU
198 if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
199#ifdef CONFIG_TAU_AVERAGE
200 /* more straightforward, but potentially misleading */
201 seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
202 cpu_temp(i));
203#else
204 /* show the actual temp sensor range */
205 u32 temp;
206 temp = cpu_temp_both(i);
207 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
208 temp & 0xff, temp >> 16);
209#endif
210 }
211#endif /* CONFIG_TAU */
212
213 if (ppc_md.show_percpuinfo != NULL) {
214 err = ppc_md.show_percpuinfo(m, i);
215 if (err)
216 return err;
217 }
218
219 /* If we are a Freescale core do a simple check so
220 * we dont have to keep adding cases in the future */
221 if ((PVR_VER(pvr) & 0x8000) == 0x8000) {
222 maj = PVR_MAJ(pvr);
223 min = PVR_MIN(pvr);
224 } else {
225 switch (PVR_VER(pvr)) {
226 case 0x0020: /* 403 family */
227 maj = PVR_MAJ(pvr) + 1;
228 min = PVR_MIN(pvr);
229 break;
230 case 0x1008: /* 740P/750P ?? */
231 maj = ((pvr >> 8) & 0xFF) - 1;
232 min = pvr & 0xFF;
233 break;
234 default:
235 maj = (pvr >> 8) & 0xFF;
236 min = pvr & 0xFF;
237 break;
238 }
239 }
240
241 seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
242 maj, min, PVR_VER(pvr), PVR_REV(pvr));
243
244 seq_printf(m, "bogomips\t: %lu.%02lu\n",
245 lpj / (500000/HZ), (lpj / (5000/HZ)) % 100);
246
247#if USES_PPC_SYS
248 if (cur_ppc_sys_spec->ppc_sys_name)
249 seq_printf(m, "chipset\t\t: %s\n",
250 cur_ppc_sys_spec->ppc_sys_name);
251#endif
252
253#ifdef CONFIG_SMP
254 seq_printf(m, "\n");
255#endif
256
257 return 0;
258}
259
260static void *c_start(struct seq_file *m, loff_t *pos)
261{
262 int i = *pos;
263
264 return i <= NR_CPUS? (void *) (i + 1): NULL;
265}
266
267static void *c_next(struct seq_file *m, void *v, loff_t *pos)
268{
269 ++*pos;
270 return c_start(m, pos);
271}
272
273static void c_stop(struct seq_file *m, void *v)
274{
275}
276
277const struct seq_operations cpuinfo_op = {
278 .start =c_start,
279 .next = c_next,
280 .stop = c_stop,
281 .show = show_cpuinfo,
282};
283
284/*
285 * We're called here very early in the boot. We determine the machine
286 * type and call the appropriate low-level setup functions.
287 * -- Cort <cort@fsmlabs.com>
288 *
289 * Note that the kernel may be running at an address which is different
290 * from the address that it was linked at, so we must use RELOC/PTRRELOC
291 * to access static data (including strings). -- paulus
292 */
293__init
294unsigned long
295early_init(int r3, int r4, int r5)
296{
297 unsigned long phys;
298 unsigned long offset = reloc_offset();
299 struct cpu_spec *spec;
300
301 /* Default */
302 phys = offset + KERNELBASE;
303
304 /* First zero the BSS -- use memset, some arches don't have
305 * caches on yet */
306 memset_io(PTRRELOC(&__bss_start), 0, _end - __bss_start);
307
308 /*
309 * Identify the CPU type and fix up code sections
310 * that depend on which cpu we have.
311 */
312#if defined(CONFIG_440EP) && defined(CONFIG_PPC_FPU)
313 /* We pass the virtual PVR here for 440EP as 440EP and 440GR have
314 * identical PVRs and there is no reliable way to check for the FPU
315 */
316 spec = identify_cpu(offset, (mfspr(SPRN_PVR) | 0x8));
317#else
318 spec = identify_cpu(offset, mfspr(SPRN_PVR));
319#endif
320 do_feature_fixups(spec->cpu_features,
321 PTRRELOC(&__start___ftr_fixup),
322 PTRRELOC(&__stop___ftr_fixup));
323
324 return phys;
325}
326
327#ifdef CONFIG_PPC_PREP
328/*
329 * The PPC_PREP version of platform_init...
330 */
331void __init
332platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
333 unsigned long r6, unsigned long r7)
334{
335#ifdef CONFIG_BOOTX_TEXT
336 if (boot_text_mapped) {
337 btext_clearscreen();
338 btext_welcome();
339 }
340#endif
341
342 parse_bootinfo(find_bootinfo());
343
344 prep_init(r3, r4, r5, r6, r7);
345}
346#endif /* CONFIG_PPC_PREP */
347
348struct bi_record *find_bootinfo(void)
349{
350 struct bi_record *rec;
351
352 rec = (struct bi_record *)_ALIGN((ulong)__bss_start+(1<<20)-1,(1<<20));
353 if ( rec->tag != BI_FIRST ) {
354 /*
355 * This 0x10000 offset is a terrible hack but it will go away when
356 * we have the bootloader handle all the relocation and
357 * prom calls -- Cort
358 */
359 rec = (struct bi_record *)_ALIGN((ulong)__bss_start+0x10000+(1<<20)-1,(1<<20));
360 if ( rec->tag != BI_FIRST )
361 return NULL;
362 }
363 return rec;
364}
365
366void parse_bootinfo(struct bi_record *rec)
367{
368 if (rec == NULL || rec->tag != BI_FIRST)
369 return;
370 while (rec->tag != BI_LAST) {
371 ulong *data = rec->data;
372 switch (rec->tag) {
373 case BI_CMD_LINE:
374 strlcpy(cmd_line, (void *)data, sizeof(cmd_line));
375 break;
376#ifdef CONFIG_BLK_DEV_INITRD
377 case BI_INITRD:
378 initrd_start = data[0] + KERNELBASE;
379 initrd_end = data[0] + data[1] + KERNELBASE;
380 break;
381#endif /* CONFIG_BLK_DEV_INITRD */
382 case BI_MEMSIZE:
383 boot_mem_size = data[0];
384 break;
385 }
386 rec = (struct bi_record *)((ulong)rec + rec->size);
387 }
388}
389
390/*
391 * Find out what kind of machine we're on and save any data we need
392 * from the early boot process (devtree is copied on pmac by prom_init()).
393 * This is called very early on the boot process, after a minimal
394 * MMU environment has been set up but before MMU_init is called.
395 */
396void __init
397machine_init(unsigned long r3, unsigned long r4, unsigned long r5,
398 unsigned long r6, unsigned long r7)
399{
400#ifdef CONFIG_CMDLINE
401 strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
402#endif /* CONFIG_CMDLINE */
403
404#ifdef CONFIG_6xx
405 ppc_md.power_save = ppc6xx_idle;
406#endif
407
408 platform_init(r3, r4, r5, r6, r7);
409
410 if (ppc_md.progress)
411 ppc_md.progress("id mach(): done", 0x200);
412}
413#ifdef CONFIG_BOOKE_WDT
414/* Checks wdt=x and wdt_period=xx command-line option */
415int __init early_parse_wdt(char *p)
416{
417 if (p && strncmp(p, "0", 1) != 0)
418 booke_wdt_enabled = 1;
419
420 return 0;
421}
422early_param("wdt", early_parse_wdt);
423
424int __init early_parse_wdt_period (char *p)
425{
426 if (p)
427 booke_wdt_period = simple_strtoul(p, NULL, 0);
428
429 return 0;
430}
431early_param("wdt_period", early_parse_wdt_period);
432#endif /* CONFIG_BOOKE_WDT */
433
434/* Checks "l2cr=xxxx" command-line option */
435int __init ppc_setup_l2cr(char *str)
436{
437 if (cpu_has_feature(CPU_FTR_L2CR)) {
438 unsigned long val = simple_strtoul(str, NULL, 0);
439 printk(KERN_INFO "l2cr set to %lx\n", val);
440 _set_L2CR(0); /* force invalidate by disable cache */
441 _set_L2CR(val); /* and enable it */
442 }
443 return 1;
444}
445__setup("l2cr=", ppc_setup_l2cr);
446
447#ifdef CONFIG_GENERIC_NVRAM
448
449/* Generic nvram hooks used by drivers/char/gen_nvram.c */
450unsigned char nvram_read_byte(int addr)
451{
452 if (ppc_md.nvram_read_val)
453 return ppc_md.nvram_read_val(addr);
454 return 0xff;
455}
456EXPORT_SYMBOL(nvram_read_byte);
457
458void nvram_write_byte(unsigned char val, int addr)
459{
460 if (ppc_md.nvram_write_val)
461 ppc_md.nvram_write_val(addr, val);
462}
463EXPORT_SYMBOL(nvram_write_byte);
464
465void nvram_sync(void)
466{
467 if (ppc_md.nvram_sync)
468 ppc_md.nvram_sync();
469}
470EXPORT_SYMBOL(nvram_sync);
471
472#endif /* CONFIG_NVRAM */
473
474static struct cpu cpu_devices[NR_CPUS];
475
476int __init ppc_init(void)
477{
478 int i;
479
480 /* clear the progress line */
481 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
482
483 /* register CPU devices */
484 for_each_possible_cpu(i)
485 register_cpu(&cpu_devices[i], i);
486
487 /* call platform init */
488 if (ppc_md.init != NULL) {
489 ppc_md.init();
490 }
491 return 0;
492}
493
494arch_initcall(ppc_init);
495
496/* Warning, IO base is not yet inited */
497void __init setup_arch(char **cmdline_p)
498{
499 extern char *klimit;
500 extern void do_init_bootmem(void);
501
502 /* so udelay does something sensible, assume <= 1000 bogomips */
503 loops_per_jiffy = 500000000 / HZ;
504
505 if (ppc_md.init_early)
506 ppc_md.init_early();
507
508#ifdef CONFIG_XMON
509 xmon_init(1);
510 if (strstr(cmd_line, "xmon"))
511 xmon(NULL);
512#endif /* CONFIG_XMON */
513 if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab);
514
515#if defined(CONFIG_KGDB)
516 if (ppc_md.kgdb_map_scc)
517 ppc_md.kgdb_map_scc();
518 set_debug_traps();
519 if (strstr(cmd_line, "gdb")) {
520 if (ppc_md.progress)
521 ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000);
522 printk("kgdb breakpoint activated\n");
523 breakpoint();
524 }
525#endif
526
527 /*
528 * Set cache line size based on type of cpu as a default.
529 * Systems with OF can look in the properties on the cpu node(s)
530 * for a possibly more accurate value.
531 */
532 if (! cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE)) {
533 dcache_bsize = cur_cpu_spec->dcache_bsize;
534 icache_bsize = cur_cpu_spec->icache_bsize;
535 ucache_bsize = 0;
536 } else
537 ucache_bsize = dcache_bsize = icache_bsize
538 = cur_cpu_spec->dcache_bsize;
539
540 /* reboot on panic */
541 panic_timeout = 180;
542
543 init_mm.start_code = PAGE_OFFSET;
544 init_mm.end_code = (unsigned long) _etext;
545 init_mm.end_data = (unsigned long) _edata;
546 init_mm.brk = (unsigned long) klimit;
547
548 /* Save unparsed command line copy for /proc/cmdline */
549 strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
550 *cmdline_p = cmd_line;
551
552 parse_early_param();
553
554 /* set up the bootmem stuff with available memory */
555 do_init_bootmem();
556 if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab);
557
558#ifdef CONFIG_PPC_OCP
559 /* Initialize OCP device list */
560 ocp_early_init();
561 if ( ppc_md.progress ) ppc_md.progress("ocp: exit", 0x3eab);
562#endif
563
564#ifdef CONFIG_DUMMY_CONSOLE
565 conswitchp = &dummy_con;
566#endif
567
568 ppc_md.setup_arch();
569 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
570
571 paging_init();
572}
diff --git a/arch/ppc/kernel/smp-tbsync.c b/arch/ppc/kernel/smp-tbsync.c
deleted file mode 100644
index d0cf3f86931d..000000000000
--- a/arch/ppc/kernel/smp-tbsync.c
+++ /dev/null
@@ -1,180 +0,0 @@
1/*
2 * Smp timebase synchronization for ppc.
3 *
4 * Copyright (C) 2003 Samuel Rydh (samuel@ibrium.se)
5 *
6 */
7
8#include <linux/kernel.h>
9#include <linux/sched.h>
10#include <linux/smp.h>
11#include <linux/unistd.h>
12#include <linux/init.h>
13#include <asm/atomic.h>
14#include <asm/smp.h>
15#include <asm/time.h>
16
17#define NUM_ITER 300
18
19enum {
20 kExit=0, kSetAndTest, kTest
21};
22
23static struct {
24 volatile int tbu;
25 volatile int tbl;
26 volatile int mark;
27 volatile int cmd;
28 volatile int handshake;
29 int filler[3];
30
31 volatile int ack;
32 int filler2[7];
33
34 volatile int race_result;
35} *tbsync;
36
37static volatile int running;
38
39static void __devinit
40enter_contest( int mark, int add )
41{
42 while( (int)(get_tbl() - mark) < 0 )
43 tbsync->race_result = add;
44}
45
46void __devinit
47smp_generic_take_timebase( void )
48{
49 int cmd, tbl, tbu;
50 unsigned long flags;
51
52 local_irq_save(flags);
53 while( !running )
54 ;
55 rmb();
56
57 for( ;; ) {
58 tbsync->ack = 1;
59 while( !tbsync->handshake )
60 ;
61 rmb();
62
63 cmd = tbsync->cmd;
64 tbl = tbsync->tbl;
65 tbu = tbsync->tbu;
66 tbsync->ack = 0;
67 if( cmd == kExit )
68 break;
69
70 if( cmd == kSetAndTest ) {
71 while( tbsync->handshake )
72 ;
73 asm volatile ("mttbl %0" :: "r" (tbl) );
74 asm volatile ("mttbu %0" :: "r" (tbu) );
75 } else {
76 while( tbsync->handshake )
77 ;
78 }
79 enter_contest( tbsync->mark, -1 );
80 }
81 local_irq_restore(flags);
82}
83
84static int __devinit
85start_contest( int cmd, int offset, int num )
86{
87 int i, tbu, tbl, mark, score=0;
88
89 tbsync->cmd = cmd;
90
91 local_irq_disable();
92 for( i=-3; i<num; ) {
93 tbl = get_tbl() + 400;
94 tbsync->tbu = tbu = get_tbu();
95 tbsync->tbl = tbl + offset;
96 tbsync->mark = mark = tbl + 400;
97
98 wmb();
99
100 tbsync->handshake = 1;
101 while( tbsync->ack )
102 ;
103
104 while( (int)(get_tbl() - tbl) <= 0 )
105 ;
106 tbsync->handshake = 0;
107 enter_contest( mark, 1 );
108
109 while( !tbsync->ack )
110 ;
111
112 if( tbsync->tbu != get_tbu() || ((tbsync->tbl ^ get_tbl()) & 0x80000000) )
113 continue;
114 if( i++ > 0 )
115 score += tbsync->race_result;
116 }
117 local_irq_enable();
118 return score;
119}
120
121void __devinit
122smp_generic_give_timebase( void )
123{
124 int i, score, score2, old, min=0, max=5000, offset=1000;
125
126 printk("Synchronizing timebase\n");
127
128 /* if this fails then this kernel won't work anyway... */
129 tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL );
130 mb();
131 running = 1;
132
133 while( !tbsync->ack )
134 ;
135
136 /* binary search */
137 for( old=-1 ; old != offset ; offset=(min+max)/2 ) {
138 score = start_contest( kSetAndTest, offset, NUM_ITER );
139
140 printk("score %d, offset %d\n", score, offset );
141
142 if( score > 0 )
143 max = offset;
144 else
145 min = offset;
146 old = offset;
147 }
148 score = start_contest( kSetAndTest, min, NUM_ITER );
149 score2 = start_contest( kSetAndTest, max, NUM_ITER );
150
151 printk( "Min %d (score %d), Max %d (score %d)\n", min, score, max, score2 );
152 score = abs( score );
153 score2 = abs( score2 );
154 offset = (score < score2) ? min : max;
155
156 /* guard against inaccurate mttb */
157 for( i=0; i<10; i++ ) {
158 start_contest( kSetAndTest, offset, NUM_ITER/10 );
159
160 if( (score2=start_contest(kTest, offset, NUM_ITER)) < 0 )
161 score2 = -score2;
162 if( score2 <= score || score2 < 20 )
163 break;
164 }
165 printk("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER );
166
167 /* exiting */
168 tbsync->cmd = kExit;
169 wmb();
170 tbsync->handshake = 1;
171 while( tbsync->ack )
172 ;
173 tbsync->handshake = 0;
174 kfree( tbsync );
175 tbsync = NULL;
176 running = 0;
177
178 /* all done */
179 smp_tb_synchronized = 1;
180}
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c
deleted file mode 100644
index 055998575cb4..000000000000
--- a/arch/ppc/kernel/smp.c
+++ /dev/null
@@ -1,414 +0,0 @@
1/*
2 * Smp support for ppc.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
6 *
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/smp.h>
15#include <linux/interrupt.h>
16#include <linux/kernel_stat.h>
17#include <linux/delay.h>
18#include <linux/init.h>
19#include <linux/spinlock.h>
20#include <linux/cache.h>
21
22#include <asm/ptrace.h>
23#include <asm/atomic.h>
24#include <asm/irq.h>
25#include <asm/page.h>
26#include <asm/pgtable.h>
27#include <asm/io.h>
28#include <asm/prom.h>
29#include <asm/smp.h>
30#include <asm/residual.h>
31#include <asm/time.h>
32#include <asm/thread_info.h>
33#include <asm/tlbflush.h>
34#include <asm/xmon.h>
35#include <asm/machdep.h>
36
37volatile int smp_commenced;
38int smp_tb_synchronized;
39struct cpuinfo_PPC cpu_data[NR_CPUS];
40atomic_t ipi_recv;
41atomic_t ipi_sent;
42cpumask_t cpu_online_map;
43cpumask_t cpu_possible_map;
44int smp_hw_index[NR_CPUS];
45struct thread_info *secondary_ti;
46static struct task_struct *idle_tasks[NR_CPUS];
47
48EXPORT_SYMBOL(cpu_online_map);
49EXPORT_SYMBOL(cpu_possible_map);
50
51/* SMP operations for this machine */
52struct smp_ops_t *smp_ops;
53
54/* all cpu mappings are 1-1 -- Cort */
55volatile unsigned long cpu_callin_map[NR_CPUS];
56
57int start_secondary(void *);
58void smp_call_function_interrupt(void);
59static int __smp_call_function(void (*func) (void *info), void *info,
60 int wait, int target);
61
62/* Low level assembly function used to backup CPU 0 state */
63extern void __save_cpu_setup(void);
64
65/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
66 *
67 * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up
68 * in /proc/interrupts will be wrong!!! --Troy */
69#define PPC_MSG_CALL_FUNCTION 0
70#define PPC_MSG_RESCHEDULE 1
71#define PPC_MSG_INVALIDATE_TLB 2
72#define PPC_MSG_XMON_BREAK 3
73
74static inline void
75smp_message_pass(int target, int msg)
76{
77 if (smp_ops) {
78 atomic_inc(&ipi_sent);
79 smp_ops->message_pass(target, msg);
80 }
81}
82
83/*
84 * Common functions
85 */
86void smp_message_recv(int msg)
87{
88 atomic_inc(&ipi_recv);
89
90 switch( msg ) {
91 case PPC_MSG_CALL_FUNCTION:
92 smp_call_function_interrupt();
93 break;
94 case PPC_MSG_RESCHEDULE:
95 set_need_resched();
96 break;
97 case PPC_MSG_INVALIDATE_TLB:
98 _tlbia();
99 break;
100#ifdef CONFIG_XMON
101 case PPC_MSG_XMON_BREAK:
102 xmon(get_irq_regs());
103 break;
104#endif /* CONFIG_XMON */
105 default:
106 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
107 smp_processor_id(), msg);
108 break;
109 }
110}
111
112/*
113 * 750's don't broadcast tlb invalidates so
114 * we have to emulate that behavior.
115 * -- Cort
116 */
117void smp_send_tlb_invalidate(int cpu)
118{
119 if ( PVR_VER(mfspr(SPRN_PVR)) == 8 )
120 smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_INVALIDATE_TLB);
121}
122
123void smp_send_reschedule(int cpu)
124{
125 /*
126 * This is only used if `cpu' is running an idle task,
127 * so it will reschedule itself anyway...
128 *
129 * This isn't the case anymore since the other CPU could be
130 * sleeping and won't reschedule until the next interrupt (such
131 * as the timer).
132 * -- Cort
133 */
134 /* This is only used if `cpu' is running an idle task,
135 so it will reschedule itself anyway... */
136 smp_message_pass(cpu, PPC_MSG_RESCHEDULE);
137}
138
139#ifdef CONFIG_XMON
140void smp_send_xmon_break(int cpu)
141{
142 smp_message_pass(cpu, PPC_MSG_XMON_BREAK);
143}
144#endif /* CONFIG_XMON */
145
146static void stop_this_cpu(void *dummy)
147{
148 local_irq_disable();
149 while (1)
150 ;
151}
152
153void smp_send_stop(void)
154{
155 smp_call_function(stop_this_cpu, NULL, 1, 0);
156}
157
158/*
159 * Structure and data for smp_call_function(). This is designed to minimise
160 * static memory requirements. It also looks cleaner.
161 * Stolen from the i386 version.
162 */
163static DEFINE_SPINLOCK(call_lock);
164
165static struct call_data_struct {
166 void (*func) (void *info);
167 void *info;
168 atomic_t started;
169 atomic_t finished;
170 int wait;
171} *call_data;
172
173/*
174 * this function sends a 'generic call function' IPI to all other CPUs
175 * in the system.
176 */
177
178int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
179 int wait)
180/*
181 * [SUMMARY] Run a function on all other CPUs.
182 * <func> The function to run. This must be fast and non-blocking.
183 * <info> An arbitrary pointer to pass to the function.
184 * <nonatomic> currently unused.
185 * <wait> If true, wait (atomically) until function has completed on other CPUs.
186 * [RETURNS] 0 on success, else a negative status code. Does not return until
187 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
188 *
189 * You must not call this function with disabled interrupts or from a
190 * hardware interrupt handler or from a bottom half handler.
191 */
192{
193 /* FIXME: get cpu lock with hotplug cpus, or change this to
194 bitmask. --RR */
195 if (num_online_cpus() <= 1)
196 return 0;
197 /* Can deadlock when called with interrupts disabled */
198 WARN_ON(irqs_disabled());
199 return __smp_call_function(func, info, wait, MSG_ALL_BUT_SELF);
200}
201
202static int __smp_call_function(void (*func) (void *info), void *info,
203 int wait, int target)
204{
205 struct call_data_struct data;
206 int ret = -1;
207 int timeout;
208 int ncpus = 1;
209
210 if (target == MSG_ALL_BUT_SELF)
211 ncpus = num_online_cpus() - 1;
212 else if (target == MSG_ALL)
213 ncpus = num_online_cpus();
214
215 data.func = func;
216 data.info = info;
217 atomic_set(&data.started, 0);
218 data.wait = wait;
219 if (wait)
220 atomic_set(&data.finished, 0);
221
222 spin_lock(&call_lock);
223 call_data = &data;
224 /* Send a message to all other CPUs and wait for them to respond */
225 smp_message_pass(target, PPC_MSG_CALL_FUNCTION);
226
227 /* Wait for response */
228 timeout = 1000000;
229 while (atomic_read(&data.started) != ncpus) {
230 if (--timeout == 0) {
231 printk("smp_call_function on cpu %d: other cpus not responding (%d)\n",
232 smp_processor_id(), atomic_read(&data.started));
233 goto out;
234 }
235 barrier();
236 udelay(1);
237 }
238
239 if (wait) {
240 timeout = 1000000;
241 while (atomic_read(&data.finished) != ncpus) {
242 if (--timeout == 0) {
243 printk("smp_call_function on cpu %d: other cpus not finishing (%d/%d)\n",
244 smp_processor_id(), atomic_read(&data.finished), atomic_read(&data.started));
245 goto out;
246 }
247 barrier();
248 udelay(1);
249 }
250 }
251 ret = 0;
252
253 out:
254 spin_unlock(&call_lock);
255 return ret;
256}
257
258void smp_call_function_interrupt(void)
259{
260 void (*func) (void *info) = call_data->func;
261 void *info = call_data->info;
262 int wait = call_data->wait;
263
264 /*
265 * Notify initiating CPU that I've grabbed the data and am
266 * about to execute the function
267 */
268 atomic_inc(&call_data->started);
269 /*
270 * At this point the info structure may be out of scope unless wait==1
271 */
272 (*func)(info);
273 if (wait)
274 atomic_inc(&call_data->finished);
275}
276
277static void __devinit smp_store_cpu_info(int id)
278{
279 struct cpuinfo_PPC *c = &cpu_data[id];
280
281 /* assume bogomips are same for everything */
282 c->loops_per_jiffy = loops_per_jiffy;
283 c->pvr = mfspr(SPRN_PVR);
284}
285
286void __init smp_prepare_cpus(unsigned int max_cpus)
287{
288 int num_cpus, i, cpu;
289 struct task_struct *p;
290
291 /* Fixup boot cpu */
292 smp_store_cpu_info(smp_processor_id());
293 cpu_callin_map[smp_processor_id()] = 1;
294
295 if (smp_ops == NULL) {
296 printk("SMP not supported on this machine.\n");
297 return;
298 }
299
300 /* Probe platform for CPUs: always linear. */
301 num_cpus = smp_ops->probe();
302
303 if (num_cpus < 2)
304 smp_tb_synchronized = 1;
305
306 for (i = 0; i < num_cpus; ++i)
307 cpu_set(i, cpu_possible_map);
308
309 /* Backup CPU 0 state */
310 __save_cpu_setup();
311
312 for_each_possible_cpu(cpu) {
313 if (cpu == smp_processor_id())
314 continue;
315 /* create a process for the processor */
316 p = fork_idle(cpu);
317 if (IS_ERR(p))
318 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
319 task_thread_info(p)->cpu = cpu;
320 idle_tasks[cpu] = p;
321 }
322}
323
324void __devinit smp_prepare_boot_cpu(void)
325{
326 cpu_set(smp_processor_id(), cpu_online_map);
327 cpu_set(smp_processor_id(), cpu_possible_map);
328}
329
330int __init setup_profiling_timer(unsigned int multiplier)
331{
332 return 0;
333}
334
335/* Processor coming up starts here */
336int __devinit start_secondary(void *unused)
337{
338 int cpu;
339
340 atomic_inc(&init_mm.mm_count);
341 current->active_mm = &init_mm;
342
343 cpu = smp_processor_id();
344 smp_store_cpu_info(cpu);
345 set_dec(tb_ticks_per_jiffy);
346 preempt_disable();
347 cpu_callin_map[cpu] = 1;
348
349 printk("CPU %d done callin...\n", cpu);
350 smp_ops->setup_cpu(cpu);
351 printk("CPU %d done setup...\n", cpu);
352 smp_ops->take_timebase();
353 printk("CPU %d done timebase take...\n", cpu);
354
355 spin_lock(&call_lock);
356 cpu_set(cpu, cpu_online_map);
357 spin_unlock(&call_lock);
358
359 local_irq_enable();
360
361 cpu_idle();
362 return 0;
363}
364
365int __cpu_up(unsigned int cpu)
366{
367 char buf[32];
368 int c;
369
370 secondary_ti = task_thread_info(idle_tasks[cpu]);
371 mb();
372
373 /*
374 * There was a cache flush loop here to flush the cache
375 * to memory for the first 8MB of RAM. The cache flush
376 * has been pushed into the kick_cpu function for those
377 * platforms that need it.
378 */
379
380 /* wake up cpu */
381 smp_ops->kick_cpu(cpu);
382
383 /*
384 * wait to see if the cpu made a callin (is actually up).
385 * use this value that I found through experimentation.
386 * -- Cort
387 */
388 for (c = 1000; c && !cpu_callin_map[cpu]; c--)
389 udelay(100);
390
391 if (!cpu_callin_map[cpu]) {
392 sprintf(buf, "didn't find cpu %u", cpu);
393 if (ppc_md.progress) ppc_md.progress(buf, 0x360+cpu);
394 printk("Processor %u is stuck.\n", cpu);
395 return -ENOENT;
396 }
397
398 sprintf(buf, "found cpu %u", cpu);
399 if (ppc_md.progress) ppc_md.progress(buf, 0x350+cpu);
400 printk("Processor %d found.\n", cpu);
401
402 smp_ops->give_timebase();
403
404 /* Wait until cpu puts itself in the online map */
405 while (!cpu_online(cpu))
406 cpu_relax();
407
408 return 0;
409}
410
411void smp_cpus_done(unsigned int max_cpus)
412{
413 smp_ops->setup_cpu(0);
414}
diff --git a/arch/ppc/kernel/softemu8xx.c b/arch/ppc/kernel/softemu8xx.c
deleted file mode 100644
index 9bbb6bf7b645..000000000000
--- a/arch/ppc/kernel/softemu8xx.c
+++ /dev/null
@@ -1,147 +0,0 @@
1/*
2 * Software emulation of some PPC instructions for the 8xx core.
3 *
4 * Copyright (C) 1998 Dan Malek (dmalek@jlc.net)
5 *
6 * Software floating emuation for the MPC8xx processor. I did this mostly
7 * because it was easier than trying to get the libraries compiled for
8 * software floating point. The goal is still to get the libraries done,
9 * but I lost patience and needed some hacks to at least get init and
10 * shells running. The first problem is the setjmp/longjmp that save
11 * and restore the floating point registers.
12 *
13 * For this emulation, our working registers are found on the register
14 * save area.
15 */
16
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/stddef.h>
22#include <linux/unistd.h>
23#include <linux/ptrace.h>
24#include <linux/slab.h>
25#include <linux/user.h>
26#include <linux/a.out.h>
27#include <linux/interrupt.h>
28
29#include <asm/pgtable.h>
30#include <asm/uaccess.h>
31#include <asm/system.h>
32#include <asm/io.h>
33
34extern void
35print_8xx_pte(struct mm_struct *mm, unsigned long addr);
36extern int
37get_8xx_pte(struct mm_struct *mm, unsigned long addr);
38
39/* Eventually we may need a look-up table, but this works for now.
40*/
41#define LFS 48
42#define LFD 50
43#define LFDU 51
44#define STFD 54
45#define STFDU 55
46#define FMR 63
47
48/*
49 * We return 0 on success, 1 on unimplemented instruction, and EFAULT
50 * if a load/store faulted.
51 */
52int
53Soft_emulate_8xx(struct pt_regs *regs)
54{
55 uint inst, instword;
56 uint flreg, idxreg, disp;
57 uint retval;
58 signed short sdisp;
59 uint *ea, *ip;
60
61 retval = 0;
62
63 instword = *((uint *)regs->nip);
64 inst = instword >> 26;
65
66 flreg = (instword >> 21) & 0x1f;
67 idxreg = (instword >> 16) & 0x1f;
68 disp = instword & 0xffff;
69
70 ea = (uint *)(regs->gpr[idxreg] + disp);
71 ip = (uint *)&current->thread.fpr[flreg];
72
73 switch ( inst )
74 {
75 case LFD:
76 /* this is a 16 bit quantity that is sign extended
77 * so use a signed short here -- Cort
78 */
79 sdisp = (instword & 0xffff);
80 ea = (uint *)(regs->gpr[idxreg] + sdisp);
81 if (copy_from_user(ip, ea, sizeof(double)))
82 retval = -EFAULT;
83 break;
84
85 case LFDU:
86 if (copy_from_user(ip, ea, sizeof(double)))
87 retval = -EFAULT;
88 else
89 regs->gpr[idxreg] = (uint)ea;
90 break;
91 case LFS:
92 sdisp = (instword & 0xffff);
93 ea = (uint *)(regs->gpr[idxreg] + sdisp);
94 if (copy_from_user(ip, ea, sizeof(float)))
95 retval = -EFAULT;
96 break;
97 case STFD:
98 /* this is a 16 bit quantity that is sign extended
99 * so use a signed short here -- Cort
100 */
101 sdisp = (instword & 0xffff);
102 ea = (uint *)(regs->gpr[idxreg] + sdisp);
103 if (copy_to_user(ea, ip, sizeof(double)))
104 retval = -EFAULT;
105 break;
106
107 case STFDU:
108 if (copy_to_user(ea, ip, sizeof(double)))
109 retval = -EFAULT;
110 else
111 regs->gpr[idxreg] = (uint)ea;
112 break;
113 case FMR:
114 /* assume this is a fp move -- Cort */
115 memcpy( ip, &current->thread.fpr[(instword>>11)&0x1f],
116 sizeof(double) );
117 break;
118 default:
119 retval = 1;
120 printk("Bad emulation %s/%d\n"
121 " NIP: %08lx instruction: %08x opcode: %x "
122 "A: %x B: %x C: %x code: %x rc: %x\n",
123 current->comm,current->pid,
124 regs->nip,
125 instword,inst,
126 (instword>>16)&0x1f,
127 (instword>>11)&0x1f,
128 (instword>>6)&0x1f,
129 (instword>>1)&0x3ff,
130 instword&1);
131 {
132 int pa;
133 print_8xx_pte(current->mm,regs->nip);
134 pa = get_8xx_pte(current->mm,regs->nip) & PAGE_MASK;
135 pa |= (regs->nip & ~PAGE_MASK);
136 pa = (unsigned long)__va(pa);
137 printk("Kernel VA for NIP %x ", pa);
138 print_8xx_pte(current->mm,pa);
139 }
140
141 }
142
143 if (retval == 0)
144 regs->nip += 4;
145 return(retval);
146}
147
diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
deleted file mode 100644
index 18ee851e33e3..000000000000
--- a/arch/ppc/kernel/time.c
+++ /dev/null
@@ -1,445 +0,0 @@
1/*
2 * Common time routines among all ppc machines.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 *
8 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
9 * to make clock more stable (2.4.0-test5). The only thing
10 * that this code assumes is that the timebases have been synchronized
11 * by firmware on SMP and are never stopped (never do sleep
12 * on SMP then, nap and doze are OK).
13 *
14 * TODO (not necessarily in this file):
15 * - improve precision and reproducibility of timebase frequency
16 * measurement at boot time.
17 * - get rid of xtime_lock for gettimeofday (generic kernel problem
18 * to be implemented on all architectures for SMP scalability and
19 * eventually implementing gettimeofday without entering the kernel).
20 * - put all time/clock related variables in a single structure
21 * to minimize number of cache lines touched by gettimeofday()
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
25 *
26 *
27 * The following comment is partially obsolete (at least the long wait
28 * is no more a valid reason):
29 * Since the MPC8xx has a programmable interrupt timer, I decided to
30 * use that rather than the decrementer. Two reasons: 1.) the clock
31 * frequency is low, causing 2.) a long wait in the timer interrupt
32 * while ((d = get_dec()) == dval)
33 * loop. The MPC8xx can be driven from a variety of input clocks,
34 * so a number of assumptions have been made here because the kernel
35 * parameter HZ is a constant. We assume (correctly, today :-) that
36 * the MPC8xx on the MBX board is driven from a 32.768 kHz crystal.
37 * This is then divided by 4, providing a 8192 Hz clock into the PIT.
38 * Since it is not possible to get a nice 100 Hz clock out of this, without
39 * creating a software PLL, I have set HZ to 128. -- Dan
40 *
41 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
42 * "A Kernel Model for Precision Timekeeping" by Dave Mills
43 */
44
45#include <linux/errno.h>
46#include <linux/sched.h>
47#include <linux/kernel.h>
48#include <linux/param.h>
49#include <linux/string.h>
50#include <linux/mm.h>
51#include <linux/module.h>
52#include <linux/interrupt.h>
53#include <linux/timex.h>
54#include <linux/kernel_stat.h>
55#include <linux/mc146818rtc.h>
56#include <linux/time.h>
57#include <linux/init.h>
58#include <linux/profile.h>
59
60#include <asm/io.h>
61#include <asm/nvram.h>
62#include <asm/cache.h>
63#include <asm/8xx_immap.h>
64#include <asm/machdep.h>
65#include <asm/irq_regs.h>
66
67#include <asm/time.h>
68
69unsigned long disarm_decr[NR_CPUS];
70
71extern struct timezone sys_tz;
72
73/* keep track of when we need to update the rtc */
74time_t last_rtc_update;
75
76/* The decrementer counts down by 128 every 128ns on a 601. */
77#define DECREMENTER_COUNT_601 (1000000000 / HZ)
78
79unsigned tb_ticks_per_jiffy;
80unsigned tb_to_us;
81unsigned tb_last_stamp;
82unsigned long tb_to_ns_scale;
83
84/* used for timezone offset */
85static long timezone_offset;
86
87DEFINE_SPINLOCK(rtc_lock);
88
89EXPORT_SYMBOL(rtc_lock);
90
91/* Timer interrupt helper function */
92static inline int tb_delta(unsigned *jiffy_stamp) {
93 int delta;
94 if (__USE_RTC()) {
95 delta = get_rtcl();
96 if (delta < *jiffy_stamp) *jiffy_stamp -= 1000000000;
97 delta -= *jiffy_stamp;
98 } else {
99 delta = get_tbl() - *jiffy_stamp;
100 }
101 return delta;
102}
103
104#ifdef CONFIG_SMP
105unsigned long profile_pc(struct pt_regs *regs)
106{
107 unsigned long pc = instruction_pointer(regs);
108
109 if (in_lock_functions(pc))
110 return regs->link;
111
112 return pc;
113}
114EXPORT_SYMBOL(profile_pc);
115#endif
116
117void wakeup_decrementer(void)
118{
119 set_dec(tb_ticks_per_jiffy);
120 /* No currently-supported powerbook has a 601,
121 * so use get_tbl, not native
122 */
123 last_jiffy_stamp(0) = tb_last_stamp = get_tbl();
124}
125
126/*
127 * timer_interrupt - gets called when the decrementer overflows,
128 * with interrupts disabled.
129 * We set it up to overflow again in 1/HZ seconds.
130 */
131void timer_interrupt(struct pt_regs * regs)
132{
133 struct pt_regs *old_regs;
134 int next_dec;
135 unsigned long cpu = smp_processor_id();
136 unsigned jiffy_stamp = last_jiffy_stamp(cpu);
137 extern void do_IRQ(struct pt_regs *);
138
139 if (atomic_read(&ppc_n_lost_interrupts) != 0)
140 do_IRQ(regs);
141
142 old_regs = set_irq_regs(regs);
143 irq_enter();
144
145 while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) <= 0) {
146 jiffy_stamp += tb_ticks_per_jiffy;
147
148 profile_tick(CPU_PROFILING);
149 update_process_times(user_mode(regs));
150
151 if (smp_processor_id())
152 continue;
153
154 /* We are in an interrupt, no need to save/restore flags */
155 write_seqlock(&xtime_lock);
156 tb_last_stamp = jiffy_stamp;
157 do_timer(1);
158
159 /*
160 * update the rtc when needed, this should be performed on the
161 * right fraction of a second. Half or full second ?
162 * Full second works on mk48t59 clocks, others need testing.
163 * Note that this update is basically only used through
164 * the adjtimex system calls. Setting the HW clock in
165 * any other way is a /dev/rtc and userland business.
166 * This is still wrong by -0.5/+1.5 jiffies because of the
167 * timer interrupt resolution and possible delay, but here we
168 * hit a quantization limit which can only be solved by higher
169 * resolution timers and decoupling time management from timer
170 * interrupts. This is also wrong on the clocks
171 * which require being written at the half second boundary.
172 * We should have an rtc call that only sets the minutes and
173 * seconds like on Intel to avoid problems with non UTC clocks.
174 */
175 if ( ppc_md.set_rtc_time && ntp_synced() &&
176 xtime.tv_sec - last_rtc_update >= 659 &&
177 abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ) {
178 if (ppc_md.set_rtc_time(xtime.tv_sec+1 + timezone_offset) == 0)
179 last_rtc_update = xtime.tv_sec+1;
180 else
181 /* Try again one minute later */
182 last_rtc_update += 60;
183 }
184 write_sequnlock(&xtime_lock);
185 }
186 if ( !disarm_decr[smp_processor_id()] )
187 set_dec(next_dec);
188 last_jiffy_stamp(cpu) = jiffy_stamp;
189
190 if (ppc_md.heartbeat && !ppc_md.heartbeat_count--)
191 ppc_md.heartbeat();
192
193 irq_exit();
194 set_irq_regs(old_regs);
195}
196
197/*
198 * This version of gettimeofday has microsecond resolution.
199 */
200void do_gettimeofday(struct timeval *tv)
201{
202 unsigned long flags;
203 unsigned long seq;
204 unsigned delta, usec, sec;
205
206 do {
207 seq = read_seqbegin_irqsave(&xtime_lock, flags);
208 sec = xtime.tv_sec;
209 usec = (xtime.tv_nsec / 1000);
210 delta = tb_ticks_since(tb_last_stamp);
211#ifdef CONFIG_SMP
212 /* As long as timebases are not in sync, gettimeofday can only
213 * have jiffy resolution on SMP.
214 */
215 if (!smp_tb_synchronized)
216 delta = 0;
217#endif /* CONFIG_SMP */
218 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
219
220 usec += mulhwu(tb_to_us, delta);
221 while (usec >= 1000000) {
222 sec++;
223 usec -= 1000000;
224 }
225 tv->tv_sec = sec;
226 tv->tv_usec = usec;
227}
228
229EXPORT_SYMBOL(do_gettimeofday);
230
231int do_settimeofday(struct timespec *tv)
232{
233 time_t wtm_sec, new_sec = tv->tv_sec;
234 long wtm_nsec, new_nsec = tv->tv_nsec;
235 unsigned long flags;
236 int tb_delta;
237
238 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
239 return -EINVAL;
240
241 write_seqlock_irqsave(&xtime_lock, flags);
242 /* Updating the RTC is not the job of this code. If the time is
243 * stepped under NTP, the RTC will be update after STA_UNSYNC
244 * is cleared. Tool like clock/hwclock either copy the RTC
245 * to the system time, in which case there is no point in writing
246 * to the RTC again, or write to the RTC but then they don't call
247 * settimeofday to perform this operation. Note also that
248 * we don't touch the decrementer since:
249 * a) it would lose timer interrupt synchronization on SMP
250 * (if it is working one day)
251 * b) it could make one jiffy spuriously shorter or longer
252 * which would introduce another source of uncertainty potentially
253 * harmful to relatively short timers.
254 */
255
256 /* This works perfectly on SMP only if the tb are in sync but
257 * guarantees an error < 1 jiffy even if they are off by eons,
258 * still reasonable when gettimeofday resolution is 1 jiffy.
259 */
260 tb_delta = tb_ticks_since(last_jiffy_stamp(smp_processor_id()));
261
262 new_nsec -= 1000 * mulhwu(tb_to_us, tb_delta);
263
264 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
265 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
266
267 set_normalized_timespec(&xtime, new_sec, new_nsec);
268 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
269
270 /* In case of a large backwards jump in time with NTP, we want the
271 * clock to be updated as soon as the PLL is again in lock.
272 */
273 last_rtc_update = new_sec - 658;
274
275 ntp_clear();
276 write_sequnlock_irqrestore(&xtime_lock, flags);
277 clock_was_set();
278 return 0;
279}
280
281EXPORT_SYMBOL(do_settimeofday);
282
283/* This function is only called on the boot processor */
284void __init time_init(void)
285{
286 time_t sec, old_sec;
287 unsigned old_stamp, stamp, elapsed;
288
289 if (ppc_md.time_init != NULL)
290 timezone_offset = ppc_md.time_init();
291
292 if (__USE_RTC()) {
293 /* 601 processor: dec counts down by 128 every 128ns */
294 tb_ticks_per_jiffy = DECREMENTER_COUNT_601;
295 /* mulhwu_scale_factor(1000000000, 1000000) is 0x418937 */
296 tb_to_us = 0x418937;
297 } else {
298 ppc_md.calibrate_decr();
299 tb_to_ns_scale = mulhwu(tb_to_us, 1000 << 10);
300 }
301
302 /* Now that the decrementer is calibrated, it can be used in case the
303 * clock is stuck, but the fact that we have to handle the 601
304 * makes things more complex. Repeatedly read the RTC until the
305 * next second boundary to try to achieve some precision. If there
306 * is no RTC, we still need to set tb_last_stamp and
307 * last_jiffy_stamp(cpu 0) to the current stamp.
308 */
309 stamp = get_native_tbl();
310 if (ppc_md.get_rtc_time) {
311 sec = ppc_md.get_rtc_time();
312 elapsed = 0;
313 do {
314 old_stamp = stamp;
315 old_sec = sec;
316 stamp = get_native_tbl();
317 if (__USE_RTC() && stamp < old_stamp)
318 old_stamp -= 1000000000;
319 elapsed += stamp - old_stamp;
320 sec = ppc_md.get_rtc_time();
321 } while ( sec == old_sec && elapsed < 2*HZ*tb_ticks_per_jiffy);
322 if (sec==old_sec)
323 printk("Warning: real time clock seems stuck!\n");
324 xtime.tv_sec = sec;
325 xtime.tv_nsec = 0;
326 /* No update now, we just read the time from the RTC ! */
327 last_rtc_update = xtime.tv_sec;
328 }
329 last_jiffy_stamp(0) = tb_last_stamp = stamp;
330
331 /* Not exact, but the timer interrupt takes care of this */
332 set_dec(tb_ticks_per_jiffy);
333
334 /* If platform provided a timezone (pmac), we correct the time */
335 if (timezone_offset) {
336 sys_tz.tz_minuteswest = -timezone_offset / 60;
337 sys_tz.tz_dsttime = 0;
338 xtime.tv_sec -= timezone_offset;
339 }
340 set_normalized_timespec(&wall_to_monotonic,
341 -xtime.tv_sec, -xtime.tv_nsec);
342}
343
344#define FEBRUARY 2
345#define STARTOFTIME 1970
346#define SECDAY 86400L
347#define SECYR (SECDAY * 365)
348
349/*
350 * Note: this is wrong for 2100, but our signed 32-bit time_t will
351 * have overflowed long before that, so who cares. -- paulus
352 */
353#define leapyear(year) ((year) % 4 == 0)
354#define days_in_year(a) (leapyear(a) ? 366 : 365)
355#define days_in_month(a) (month_days[(a) - 1])
356
357static int month_days[12] = {
358 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
359};
360
361void to_tm(int tim, struct rtc_time * tm)
362{
363 register int i;
364 register long hms, day, gday;
365
366 gday = day = tim / SECDAY;
367 hms = tim % SECDAY;
368
369 /* Hours, minutes, seconds are easy */
370 tm->tm_hour = hms / 3600;
371 tm->tm_min = (hms % 3600) / 60;
372 tm->tm_sec = (hms % 3600) % 60;
373
374 /* Number of years in days */
375 for (i = STARTOFTIME; day >= days_in_year(i); i++)
376 day -= days_in_year(i);
377 tm->tm_year = i;
378
379 /* Number of months in days left */
380 if (leapyear(tm->tm_year))
381 days_in_month(FEBRUARY) = 29;
382 for (i = 1; day >= days_in_month(i); i++)
383 day -= days_in_month(i);
384 days_in_month(FEBRUARY) = 28;
385 tm->tm_mon = i;
386
387 /* Days are what is left over (+1) from all that. */
388 tm->tm_mday = day + 1;
389
390 /*
391 * Determine the day of week. Jan. 1, 1970 was a Thursday.
392 */
393 tm->tm_wday = (gday + 4) % 7;
394}
395
396/* Auxiliary function to compute scaling factors */
397/* Actually the choice of a timebase running at 1/4 the of the bus
398 * frequency giving resolution of a few tens of nanoseconds is quite nice.
399 * It makes this computation very precise (27-28 bits typically) which
400 * is optimistic considering the stability of most processor clock
401 * oscillators and the precision with which the timebase frequency
402 * is measured but does not harm.
403 */
404unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) {
405 unsigned mlt=0, tmp, err;
406 /* No concern for performance, it's done once: use a stupid
407 * but safe and compact method to find the multiplier.
408 */
409 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
410 if (mulhwu(inscale, mlt|tmp) < outscale) mlt|=tmp;
411 }
412 /* We might still be off by 1 for the best approximation.
413 * A side effect of this is that if outscale is too large
414 * the returned value will be zero.
415 * Many corner cases have been checked and seem to work,
416 * some might have been forgotten in the test however.
417 */
418 err = inscale*(mlt+1);
419 if (err <= inscale/2) mlt++;
420 return mlt;
421}
422
423unsigned long long sched_clock(void)
424{
425 unsigned long lo, hi, hi2;
426 unsigned long long tb;
427
428 if (!__USE_RTC()) {
429 do {
430 hi = get_tbu();
431 lo = get_tbl();
432 hi2 = get_tbu();
433 } while (hi2 != hi);
434 tb = ((unsigned long long) hi << 32) | lo;
435 tb = (tb * tb_to_ns_scale) >> 10;
436 } else {
437 do {
438 hi = get_rtcu();
439 lo = get_rtcl();
440 hi2 = get_rtcu();
441 } while (hi2 != hi);
442 tb = ((unsigned long long) hi) * 1000000000 + lo;
443 }
444 return tb;
445}
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
deleted file mode 100644
index a467a429c2fe..000000000000
--- a/arch/ppc/kernel/traps.c
+++ /dev/null
@@ -1,826 +0,0 @@
1/*
2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Modified by Cort Dougan (cort@cs.nmt.edu)
10 * and Paul Mackerras (paulus@cs.anu.edu.au)
11 */
12
13/*
14 * This file handles the architecture-dependent parts of hardware exceptions
15 */
16
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/stddef.h>
22#include <linux/unistd.h>
23#include <linux/ptrace.h>
24#include <linux/slab.h>
25#include <linux/user.h>
26#include <linux/a.out.h>
27#include <linux/interrupt.h>
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/prctl.h>
31#include <linux/bug.h>
32
33#include <asm/pgtable.h>
34#include <asm/uaccess.h>
35#include <asm/system.h>
36#include <asm/io.h>
37#include <asm/reg.h>
38#include <asm/xmon.h>
39#include <asm/pmc.h>
40
41#ifdef CONFIG_XMON
42extern int xmon_bpt(struct pt_regs *regs);
43extern int xmon_sstep(struct pt_regs *regs);
44extern int xmon_iabr_match(struct pt_regs *regs);
45extern int xmon_dabr_match(struct pt_regs *regs);
46
47int (*debugger)(struct pt_regs *regs) = xmon;
48int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt;
49int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep;
50int (*debugger_iabr_match)(struct pt_regs *regs) = xmon_iabr_match;
51int (*debugger_dabr_match)(struct pt_regs *regs) = xmon_dabr_match;
52void (*debugger_fault_handler)(struct pt_regs *regs);
53#else
54#ifdef CONFIG_KGDB
55int (*debugger)(struct pt_regs *regs);
56int (*debugger_bpt)(struct pt_regs *regs);
57int (*debugger_sstep)(struct pt_regs *regs);
58int (*debugger_iabr_match)(struct pt_regs *regs);
59int (*debugger_dabr_match)(struct pt_regs *regs);
60void (*debugger_fault_handler)(struct pt_regs *regs);
61#else
62#define debugger(regs) do { } while (0)
63#define debugger_bpt(regs) 0
64#define debugger_sstep(regs) 0
65#define debugger_iabr_match(regs) 0
66#define debugger_dabr_match(regs) 0
67#define debugger_fault_handler ((void (*)(struct pt_regs *))0)
68#endif
69#endif
70
71/*
72 * Trap & Exception support
73 */
74
75DEFINE_SPINLOCK(die_lock);
76
77int die(const char * str, struct pt_regs * fp, long err)
78{
79 static int die_counter;
80 int nl = 0;
81 console_verbose();
82 spin_lock_irq(&die_lock);
83 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
84#ifdef CONFIG_PREEMPT
85 printk("PREEMPT ");
86 nl = 1;
87#endif
88#ifdef CONFIG_SMP
89 printk("SMP NR_CPUS=%d ", NR_CPUS);
90 nl = 1;
91#endif
92 if (nl)
93 printk("\n");
94 show_regs(fp);
95 add_taint(TAINT_DIE);
96 spin_unlock_irq(&die_lock);
97 /* do_exit() should take care of panic'ing from an interrupt
98 * context so we don't handle it here
99 */
100 do_exit(err);
101}
102
103void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
104{
105 siginfo_t info;
106
107 if (!user_mode(regs)) {
108 debugger(regs);
109 die("Exception in kernel mode", regs, signr);
110 }
111 info.si_signo = signr;
112 info.si_errno = 0;
113 info.si_code = code;
114 info.si_addr = (void __user *) addr;
115 force_sig_info(signr, &info, current);
116
117 /*
118 * Init gets no signals that it doesn't have a handler for.
119 * That's all very well, but if it has caused a synchronous
120 * exception and we ignore the resulting signal, it will just
121 * generate the same exception over and over again and we get
122 * nowhere. Better to kill it and let the kernel panic.
123 */
124 if (is_global_init(current)) {
125 __sighandler_t handler;
126
127 spin_lock_irq(&current->sighand->siglock);
128 handler = current->sighand->action[signr-1].sa.sa_handler;
129 spin_unlock_irq(&current->sighand->siglock);
130 if (handler == SIG_DFL) {
131 /* init has generated a synchronous exception
132 and it doesn't have a handler for the signal */
133 printk(KERN_CRIT "init has generated signal %d "
134 "but has no handler for it\n", signr);
135 do_exit(signr);
136 }
137 }
138}
139
140/*
141 * I/O accesses can cause machine checks on powermacs.
142 * Check if the NIP corresponds to the address of a sync
143 * instruction for which there is an entry in the exception
144 * table.
145 * Note that the 601 only takes a machine check on TEA
146 * (transfer error ack) signal assertion, and does not
147 * set any of the top 16 bits of SRR1.
148 * -- paulus.
149 */
150static inline int check_io_access(struct pt_regs *regs)
151{
152#if defined CONFIG_8xx
153 unsigned long msr = regs->msr;
154 const struct exception_table_entry *entry;
155 unsigned int *nip = (unsigned int *)regs->nip;
156
157 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
158 && (entry = search_exception_tables(regs->nip)) != NULL) {
159 /*
160 * Check that it's a sync instruction, or somewhere
161 * in the twi; isync; nop sequence that inb/inw/inl uses.
162 * As the address is in the exception table
163 * we should be able to read the instr there.
164 * For the debug message, we look at the preceding
165 * load or store.
166 */
167 if (*nip == 0x60000000) /* nop */
168 nip -= 2;
169 else if (*nip == 0x4c00012c) /* isync */
170 --nip;
171 /* eieio from I/O string functions */
172 else if ((*nip) == 0x7c0006ac || *(nip+1) == 0x7c0006ac)
173 nip += 2;
174 if (*nip == 0x7c0004ac || (*nip >> 26) == 3 ||
175 (*(nip+1) >> 26) == 3) {
176 /* sync or twi */
177 unsigned int rb;
178
179 --nip;
180 rb = (*nip >> 11) & 0x1f;
181 printk(KERN_DEBUG "%s bad port %lx at %p\n",
182 (*nip & 0x100)? "OUT to": "IN from",
183 regs->gpr[rb] - _IO_BASE, nip);
184 regs->msr |= MSR_RI;
185 regs->nip = entry->fixup;
186 return 1;
187 }
188 }
189#endif /* CONFIG_8xx */
190 return 0;
191}
192
193#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
194/* On 4xx, the reason for the machine check or program exception
195 is in the ESR. */
196#define get_reason(regs) ((regs)->dsisr)
197#define get_mc_reason(regs) ((regs)->dsisr)
198#define REASON_FP ESR_FP
199#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
200#define REASON_PRIVILEGED ESR_PPR
201#define REASON_TRAP ESR_PTR
202
203/* single-step stuff */
204#define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC)
205#define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
206
207#else
208/* On non-4xx, the reason for the machine check or program
209 exception is in the MSR. */
210#define get_reason(regs) ((regs)->msr)
211#define get_mc_reason(regs) ((regs)->msr)
212#define REASON_FP 0x100000
213#define REASON_ILLEGAL 0x80000
214#define REASON_PRIVILEGED 0x40000
215#define REASON_TRAP 0x20000
216
217#define single_stepping(regs) ((regs)->msr & MSR_SE)
218#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
219#endif
220
221/*
222 * This is "fall-back" implementation for configurations
223 * which don't provide platform-specific machine check info
224 */
225void __attribute__ ((weak))
226platform_machine_check(struct pt_regs *regs)
227{
228}
229
230#if defined(CONFIG_4xx)
231int machine_check_4xx(struct pt_regs *regs)
232{
233 unsigned long reason = get_mc_reason(regs);
234
235 if (reason & ESR_IMCP) {
236 printk("Instruction");
237 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
238 } else
239 printk("Data");
240 printk(" machine check in kernel mode.\n");
241
242 return 0;
243}
244
245int machine_check_440A(struct pt_regs *regs)
246{
247 unsigned long reason = get_mc_reason(regs);
248
249 printk("Machine check in kernel mode.\n");
250 if (reason & ESR_IMCP){
251 printk("Instruction Synchronous Machine Check exception\n");
252 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
253 }
254 else {
255 u32 mcsr = mfspr(SPRN_MCSR);
256 if (mcsr & MCSR_IB)
257 printk("Instruction Read PLB Error\n");
258 if (mcsr & MCSR_DRB)
259 printk("Data Read PLB Error\n");
260 if (mcsr & MCSR_DWB)
261 printk("Data Write PLB Error\n");
262 if (mcsr & MCSR_TLBP)
263 printk("TLB Parity Error\n");
264 if (mcsr & MCSR_ICP){
265 flush_instruction_cache();
266 printk("I-Cache Parity Error\n");
267 }
268 if (mcsr & MCSR_DCSP)
269 printk("D-Cache Search Parity Error\n");
270 if (mcsr & MCSR_DCFP)
271 printk("D-Cache Flush Parity Error\n");
272 if (mcsr & MCSR_IMPE)
273 printk("Machine Check exception is imprecise\n");
274
275 /* Clear MCSR */
276 mtspr(SPRN_MCSR, mcsr);
277 }
278 return 0;
279}
280#else
281int machine_check_generic(struct pt_regs *regs)
282{
283 unsigned long reason = get_mc_reason(regs);
284
285 printk("Machine check in kernel mode.\n");
286 printk("Caused by (from SRR1=%lx): ", reason);
287 switch (reason & 0x601F0000) {
288 case 0x80000:
289 printk("Machine check signal\n");
290 break;
291 case 0: /* for 601 */
292 case 0x40000:
293 case 0x140000: /* 7450 MSS error and TEA */
294 printk("Transfer error ack signal\n");
295 break;
296 case 0x20000:
297 printk("Data parity error signal\n");
298 break;
299 case 0x10000:
300 printk("Address parity error signal\n");
301 break;
302 case 0x20000000:
303 printk("L1 Data Cache error\n");
304 break;
305 case 0x40000000:
306 printk("L1 Instruction Cache error\n");
307 break;
308 case 0x00100000:
309 printk("L2 data cache parity error\n");
310 break;
311 default:
312 printk("Unknown values in msr\n");
313 }
314 return 0;
315}
316#endif /* everything else */
317
318void machine_check_exception(struct pt_regs *regs)
319{
320 int recover = 0;
321
322 if (cur_cpu_spec->machine_check)
323 recover = cur_cpu_spec->machine_check(regs);
324 if (recover > 0)
325 return;
326
327 if (user_mode(regs)) {
328 regs->msr |= MSR_RI;
329 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
330 return;
331 }
332
333#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
334 /* the qspan pci read routines can cause machine checks -- Cort */
335 bad_page_fault(regs, regs->dar, SIGBUS);
336 return;
337#endif
338
339 if (debugger_fault_handler) {
340 debugger_fault_handler(regs);
341 regs->msr |= MSR_RI;
342 return;
343 }
344
345 if (check_io_access(regs))
346 return;
347
348 /*
349 * Optional platform-provided routine to print out
350 * additional info, e.g. bus error registers.
351 */
352 platform_machine_check(regs);
353
354 debugger(regs);
355 die("machine check", regs, SIGBUS);
356}
357
358void SMIException(struct pt_regs *regs)
359{
360 debugger(regs);
361#if !(defined(CONFIG_XMON) || defined(CONFIG_KGDB))
362 show_regs(regs);
363 panic("System Management Interrupt");
364#endif
365}
366
367void unknown_exception(struct pt_regs *regs)
368{
369 printk("Bad trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
370 regs->nip, regs->msr, regs->trap, print_tainted());
371 _exception(SIGTRAP, regs, 0, 0);
372}
373
374void instruction_breakpoint_exception(struct pt_regs *regs)
375{
376 if (debugger_iabr_match(regs))
377 return;
378 _exception(SIGTRAP, regs, TRAP_BRKPT, 0);
379}
380
381void RunModeException(struct pt_regs *regs)
382{
383 _exception(SIGTRAP, regs, 0, 0);
384}
385
386/* Illegal instruction emulation support. Originally written to
387 * provide the PVR to user applications using the mfspr rd, PVR.
388 * Return non-zero if we can't emulate, or -EFAULT if the associated
389 * memory access caused an access fault. Return zero on success.
390 *
391 * There are a couple of ways to do this, either "decode" the instruction
392 * or directly match lots of bits. In this case, matching lots of
393 * bits is faster and easier.
394 *
395 */
396#define INST_MFSPR_PVR 0x7c1f42a6
397#define INST_MFSPR_PVR_MASK 0xfc1fffff
398
399#define INST_DCBA 0x7c0005ec
400#define INST_DCBA_MASK 0x7c0007fe
401
402#define INST_MCRXR 0x7c000400
403#define INST_MCRXR_MASK 0x7c0007fe
404
405#define INST_STRING 0x7c00042a
406#define INST_STRING_MASK 0x7c0007fe
407#define INST_STRING_GEN_MASK 0x7c00067e
408#define INST_LSWI 0x7c0004aa
409#define INST_LSWX 0x7c00042a
410#define INST_STSWI 0x7c0005aa
411#define INST_STSWX 0x7c00052a
412
413static int emulate_string_inst(struct pt_regs *regs, u32 instword)
414{
415 u8 rT = (instword >> 21) & 0x1f;
416 u8 rA = (instword >> 16) & 0x1f;
417 u8 NB_RB = (instword >> 11) & 0x1f;
418 u32 num_bytes;
419 unsigned long EA;
420 int pos = 0;
421
422 /* Early out if we are an invalid form of lswx */
423 if ((instword & INST_STRING_MASK) == INST_LSWX)
424 if ((rT == rA) || (rT == NB_RB))
425 return -EINVAL;
426
427 EA = (rA == 0) ? 0 : regs->gpr[rA];
428
429 switch (instword & INST_STRING_MASK) {
430 case INST_LSWX:
431 case INST_STSWX:
432 EA += NB_RB;
433 num_bytes = regs->xer & 0x7f;
434 break;
435 case INST_LSWI:
436 case INST_STSWI:
437 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
438 break;
439 default:
440 return -EINVAL;
441 }
442
443 while (num_bytes != 0)
444 {
445 u8 val;
446 u32 shift = 8 * (3 - (pos & 0x3));
447
448 switch ((instword & INST_STRING_MASK)) {
449 case INST_LSWX:
450 case INST_LSWI:
451 if (get_user(val, (u8 __user *)EA))
452 return -EFAULT;
453 /* first time updating this reg,
454 * zero it out */
455 if (pos == 0)
456 regs->gpr[rT] = 0;
457 regs->gpr[rT] |= val << shift;
458 break;
459 case INST_STSWI:
460 case INST_STSWX:
461 val = regs->gpr[rT] >> shift;
462 if (put_user(val, (u8 __user *)EA))
463 return -EFAULT;
464 break;
465 }
466 /* move EA to next address */
467 EA += 1;
468 num_bytes--;
469
470 /* manage our position within the register */
471 if (++pos == 4) {
472 pos = 0;
473 if (++rT == 32)
474 rT = 0;
475 }
476 }
477
478 return 0;
479}
480
481static int emulate_instruction(struct pt_regs *regs)
482{
483 u32 instword;
484 u32 rd;
485
486 if (!user_mode(regs))
487 return -EINVAL;
488 CHECK_FULL_REGS(regs);
489
490 if (get_user(instword, (u32 __user *)(regs->nip)))
491 return -EFAULT;
492
493 /* Emulate the mfspr rD, PVR.
494 */
495 if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
496 rd = (instword >> 21) & 0x1f;
497 regs->gpr[rd] = mfspr(SPRN_PVR);
498 return 0;
499 }
500
501 /* Emulating the dcba insn is just a no-op. */
502 if ((instword & INST_DCBA_MASK) == INST_DCBA)
503 return 0;
504
505 /* Emulate the mcrxr insn. */
506 if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
507 int shift = (instword >> 21) & 0x1c;
508 unsigned long msk = 0xf0000000UL >> shift;
509
510 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
511 regs->xer &= ~0xf0000000UL;
512 return 0;
513 }
514
515 /* Emulate load/store string insn. */
516 if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
517 return emulate_string_inst(regs, instword);
518
519 return -EINVAL;
520}
521
522/*
523 * After we have successfully emulated an instruction, we have to
524 * check if the instruction was being single-stepped, and if so,
525 * pretend we got a single-step exception. This was pointed out
526 * by Kumar Gala. -- paulus
527 */
528static void emulate_single_step(struct pt_regs *regs)
529{
530 if (single_stepping(regs)) {
531 clear_single_step(regs);
532 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
533 }
534}
535
536int is_valid_bugaddr(unsigned long addr)
537{
538 return addr >= PAGE_OFFSET;
539}
540
541void program_check_exception(struct pt_regs *regs)
542{
543 unsigned int reason = get_reason(regs);
544 extern int do_mathemu(struct pt_regs *regs);
545
546#ifdef CONFIG_MATH_EMULATION
547 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
548 * but there seems to be a hardware bug on the 405GP (RevD)
549 * that means ESR is sometimes set incorrectly - either to
550 * ESR_DST (!?) or 0. In the process of chasing this with the
551 * hardware people - not sure if it can happen on any illegal
552 * instruction or only on FP instructions, whether there is a
553 * pattern to occurrences etc. -dgibson 31/Mar/2003 */
554 if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) {
555 emulate_single_step(regs);
556 return;
557 }
558#endif /* CONFIG_MATH_EMULATION */
559
560 if (reason & REASON_FP) {
561 /* IEEE FP exception */
562 int code = 0;
563 u32 fpscr;
564
565 /* We must make sure the FP state is consistent with
566 * our MSR_FP in regs
567 */
568 preempt_disable();
569 if (regs->msr & MSR_FP)
570 giveup_fpu(current);
571 preempt_enable();
572
573 fpscr = current->thread.fpscr.val;
574 fpscr &= fpscr << 22; /* mask summary bits with enables */
575 if (fpscr & FPSCR_VX)
576 code = FPE_FLTINV;
577 else if (fpscr & FPSCR_OX)
578 code = FPE_FLTOVF;
579 else if (fpscr & FPSCR_UX)
580 code = FPE_FLTUND;
581 else if (fpscr & FPSCR_ZX)
582 code = FPE_FLTDIV;
583 else if (fpscr & FPSCR_XX)
584 code = FPE_FLTRES;
585 _exception(SIGFPE, regs, code, regs->nip);
586 return;
587 }
588
589 if (reason & REASON_TRAP) {
590 /* trap exception */
591 if (debugger_bpt(regs))
592 return;
593
594 if (!(regs->msr & MSR_PR) && /* not user-mode */
595 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
596 regs->nip += 4;
597 return;
598 }
599 _exception(SIGTRAP, regs, TRAP_BRKPT, 0);
600 return;
601 }
602
603 /* Try to emulate it if we should. */
604 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
605 switch (emulate_instruction(regs)) {
606 case 0:
607 regs->nip += 4;
608 emulate_single_step(regs);
609 return;
610 case -EFAULT:
611 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
612 return;
613 }
614 }
615
616 if (reason & REASON_PRIVILEGED)
617 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
618 else
619 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
620}
621
622void single_step_exception(struct pt_regs *regs)
623{
624 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */
625 if (debugger_sstep(regs))
626 return;
627 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
628}
629
630void alignment_exception(struct pt_regs *regs)
631{
632 int sig, code, fixed = 0;
633
634 fixed = fix_alignment(regs);
635 if (fixed == 1) {
636 regs->nip += 4; /* skip over emulated instruction */
637 emulate_single_step(regs);
638 return;
639 }
640 if (fixed == -EFAULT) {
641 sig = SIGSEGV;
642 code = SEGV_ACCERR;
643 } else {
644 sig = SIGBUS;
645 code = BUS_ADRALN;
646 }
647 if (user_mode(regs))
648 _exception(sig, regs, code, regs->dar);
649 else
650 bad_page_fault(regs, regs->dar, sig);
651}
652
653void StackOverflow(struct pt_regs *regs)
654{
655 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
656 current, regs->gpr[1]);
657 debugger(regs);
658 show_regs(regs);
659 panic("kernel stack overflow");
660}
661
662void nonrecoverable_exception(struct pt_regs *regs)
663{
664 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
665 regs->nip, regs->msr);
666 debugger(regs);
667 die("nonrecoverable exception", regs, SIGKILL);
668}
669
670void trace_syscall(struct pt_regs *regs)
671{
672 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
673 current, current->pid, regs->nip, regs->link, regs->gpr[0],
674 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
675}
676
677#ifdef CONFIG_8xx
678void SoftwareEmulation(struct pt_regs *regs)
679{
680 extern int do_mathemu(struct pt_regs *);
681 extern int Soft_emulate_8xx(struct pt_regs *);
682 int errcode;
683
684 CHECK_FULL_REGS(regs);
685
686 if (!user_mode(regs)) {
687 debugger(regs);
688 die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
689 }
690
691#ifdef CONFIG_MATH_EMULATION
692 errcode = do_mathemu(regs);
693#else
694 errcode = Soft_emulate_8xx(regs);
695#endif
696 if (errcode) {
697 if (errcode > 0)
698 _exception(SIGFPE, regs, 0, 0);
699 else if (errcode == -EFAULT)
700 _exception(SIGSEGV, regs, 0, 0);
701 else
702 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
703 } else
704 emulate_single_step(regs);
705}
706#endif /* CONFIG_8xx */
707
708#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
709
710void DebugException(struct pt_regs *regs, unsigned long debug_status)
711{
712 if (debug_status & DBSR_IC) { /* instruction completion */
713 regs->msr &= ~MSR_DE;
714 if (user_mode(regs)) {
715 current->thread.dbcr0 &= ~DBCR0_IC;
716 } else {
717 /* Disable instruction completion */
718 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
719 /* Clear the instruction completion event */
720 mtspr(SPRN_DBSR, DBSR_IC);
721 if (debugger_sstep(regs))
722 return;
723 }
724 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
725 }
726}
727#endif /* CONFIG_4xx || CONFIG_BOOKE */
728
729#if !defined(CONFIG_TAU_INT)
730void TAUException(struct pt_regs *regs)
731{
732 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
733 regs->nip, regs->msr, regs->trap, print_tainted());
734}
735#endif /* CONFIG_INT_TAU */
736
737/*
738 * FP unavailable trap from kernel - print a message, but let
739 * the task use FP in the kernel until it returns to user mode.
740 */
741void kernel_fp_unavailable_exception(struct pt_regs *regs)
742{
743 regs->msr |= MSR_FP;
744 printk(KERN_ERR "floating point used in kernel (task=%p, pc=%lx)\n",
745 current, regs->nip);
746}
747
748void altivec_unavailable_exception(struct pt_regs *regs)
749{
750 static int kernel_altivec_count;
751
752#ifndef CONFIG_ALTIVEC
753 if (user_mode(regs)) {
754 /* A user program has executed an altivec instruction,
755 but this kernel doesn't support altivec. */
756 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
757 return;
758 }
759#endif
760 /* The kernel has executed an altivec instruction without
761 first enabling altivec. Whinge but let it do it. */
762 if (++kernel_altivec_count < 10)
763 printk(KERN_ERR "AltiVec used in kernel (task=%p, pc=%lx)\n",
764 current, regs->nip);
765 regs->msr |= MSR_VEC;
766}
767
768#ifdef CONFIG_ALTIVEC
769void altivec_assist_exception(struct pt_regs *regs)
770{
771 int err;
772
773 preempt_disable();
774 if (regs->msr & MSR_VEC)
775 giveup_altivec(current);
776 preempt_enable();
777 if (!user_mode(regs)) {
778 printk(KERN_ERR "altivec assist exception in kernel mode"
779 " at %lx\n", regs->nip);
780 debugger(regs);
781 die("altivec assist exception", regs, SIGFPE);
782 return;
783 }
784
785 err = emulate_altivec(regs);
786 if (err == 0) {
787 regs->nip += 4; /* skip emulated instruction */
788 emulate_single_step(regs);
789 return;
790 }
791
792 if (err == -EFAULT) {
793 /* got an error reading the instruction */
794 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
795 } else {
796 /* didn't recognize the instruction */
797 /* XXX quick hack for now: set the non-Java bit in the VSCR */
798 printk(KERN_ERR "unrecognized altivec instruction "
799 "in %s at %lx\n", current->comm, regs->nip);
800 current->thread.vscr.u[3] |= 0x10000;
801 }
802}
803#endif /* CONFIG_ALTIVEC */
804
805#ifdef CONFIG_BOOKE_WDT
806/*
807 * Default handler for a Watchdog exception,
808 * spins until a reboot occurs
809 */
810void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
811{
812 /* Generic WatchdogHandler, implement your own */
813 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
814 return;
815}
816
817void WatchdogException(struct pt_regs *regs)
818{
819 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
820 WatchdogHandler(regs);
821}
822#endif
823
824void __init trap_init(void)
825{
826}
diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S
deleted file mode 100644
index 8a24bc47eb6c..000000000000
--- a/arch/ppc/kernel/vmlinux.lds.S
+++ /dev/null
@@ -1,164 +0,0 @@
1#include <asm-generic/vmlinux.lds.h>
2
3OUTPUT_ARCH(powerpc:common)
4jiffies = jiffies_64 + 4;
5SECTIONS
6{
7 /* Read-only sections, merged into text segment: */
8 . = + SIZEOF_HEADERS;
9 .interp : { *(.interp) }
10 .hash : { *(.hash) }
11 .gnu.hash : { *(.gnu.hash) }
12 .dynsym : { *(.dynsym) }
13 .dynstr : { *(.dynstr) }
14 .rel.text : { *(.rel.text) }
15 .rela.text : { *(.rela.text) }
16 .rel.data : { *(.rel.data) }
17 .rela.data : { *(.rela.data) }
18 .rel.rodata : { *(.rel.rodata) }
19 .rela.rodata : { *(.rela.rodata) }
20 .rel.got : { *(.rel.got) }
21 .rela.got : { *(.rela.got) }
22 .rel.ctors : { *(.rel.ctors) }
23 .rela.ctors : { *(.rela.ctors) }
24 .rel.dtors : { *(.rel.dtors) }
25 .rela.dtors : { *(.rela.dtors) }
26 .rel.bss : { *(.rel.bss) }
27 .rela.bss : { *(.rela.bss) }
28 .rel.plt : { *(.rel.plt) }
29 .rela.plt : { *(.rela.plt) }
30/* .init : { *(.init) } =0*/
31 .plt : { *(.plt) }
32 .text :
33 {
34 _text = .;
35 TEXT_TEXT
36 SCHED_TEXT
37 LOCK_TEXT
38 *(.fixup)
39 *(.got1)
40 __got2_start = .;
41 *(.got2)
42 __got2_end = .;
43 }
44 _etext = .;
45 PROVIDE (etext = .);
46
47 RODATA
48 .fini : { *(.fini) } =0
49 .ctors : { *(.ctors) }
50 .dtors : { *(.dtors) }
51
52 .fixup : { *(.fixup) }
53
54 __ex_table : {
55 __start___ex_table = .;
56 *(__ex_table)
57 __stop___ex_table = .;
58 }
59
60 __bug_table : {
61 __start___bug_table = .;
62 *(__bug_table)
63 __stop___bug_table = .;
64 }
65
66 /* Read-write section, merged into data segment: */
67 . = ALIGN(4096);
68 .data :
69 {
70 DATA_DATA
71 *(.data1)
72 *(.sdata)
73 *(.sdata2)
74 *(.got.plt) *(.got)
75 *(.dynamic)
76 CONSTRUCTORS
77 }
78
79 . = ALIGN(4096);
80 __nosave_begin = .;
81 .data_nosave : { *(.data.nosave) }
82 . = ALIGN(4096);
83 __nosave_end = .;
84
85 . = ALIGN(32);
86 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
87
88 _edata = .;
89 PROVIDE (edata = .);
90
91 . = ALIGN(8192);
92 .data.init_task : { *(.data.init_task) }
93
94 NOTES
95
96 . = ALIGN(4096);
97 __init_begin = .;
98 .init.text : {
99 _sinittext = .;
100 INIT_TEXT
101 _einittext = .;
102 }
103 /* .exit.text is discarded at runtime, not link time,
104 to deal with references from __bug_table */
105 .exit.text : { EXIT_TEXT }
106 .init.data : {
107 INIT_DATA
108 __vtop_table_begin = .;
109 *(.vtop_fixup);
110 __vtop_table_end = .;
111 __ptov_table_begin = .;
112 *(.ptov_fixup);
113 __ptov_table_end = .;
114 }
115 . = ALIGN(16);
116 __setup_start = .;
117 .init.setup : { *(.init.setup) }
118 __setup_end = .;
119 __initcall_start = .;
120 .initcall.init : {
121 INITCALLS
122 }
123 __initcall_end = .;
124
125 __con_initcall_start = .;
126 .con_initcall.init : { *(.con_initcall.init) }
127 __con_initcall_end = .;
128
129 SECURITY_INIT
130
131 __start___ftr_fixup = .;
132 __ftr_fixup : { *(__ftr_fixup) }
133 __stop___ftr_fixup = .;
134
135 PERCPU(4096)
136
137#ifdef CONFIG_BLK_DEV_INITRD
138 . = ALIGN(4096);
139 __initramfs_start = .;
140 .init.ramfs : { *(.init.ramfs) }
141 __initramfs_end = .;
142#endif
143
144 . = ALIGN(4096);
145 __init_end = .;
146 __bss_start = .;
147 .bss :
148 {
149 *(.sbss) *(.scommon)
150 *(.dynbss)
151 *(.bss)
152 *(COMMON)
153 }
154 __bss_stop = .;
155
156 _end = . ;
157 PROVIDE (end = .);
158
159 /* Sections to be discarded. */
160 /DISCARD/ : {
161 *(.exitcall.exit)
162 EXIT_DATA
163 }
164}