diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ppc/kernel |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/ppc/kernel')
45 files changed, 22654 insertions, 0 deletions
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile new file mode 100644 index 000000000000..86bc878cb3ee --- /dev/null +++ b/arch/ppc/kernel/Makefile | |||
@@ -0,0 +1,33 @@ | |||
1 | # | ||
2 | # Makefile for the linux kernel. | ||
3 | # | ||
4 | |||
5 | extra-$(CONFIG_PPC_STD_MMU) := head.o | ||
6 | extra-$(CONFIG_40x) := head_4xx.o | ||
7 | extra-$(CONFIG_44x) := head_44x.o | ||
8 | extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o | ||
9 | extra-$(CONFIG_8xx) := head_8xx.o | ||
10 | extra-$(CONFIG_6xx) += idle_6xx.o | ||
11 | extra-$(CONFIG_POWER4) += idle_power4.o | ||
12 | extra-y += vmlinux.lds | ||
13 | |||
14 | obj-y := entry.o traps.o irq.o idle.o time.o misc.o \ | ||
15 | process.o signal.o ptrace.o align.o \ | ||
16 | semaphore.o syscalls.o setup.o \ | ||
17 | cputable.o ppc_htab.o perfmon.o | ||
18 | obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o | ||
19 | obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o | ||
20 | obj-$(CONFIG_POWER4) += cpu_setup_power4.o | ||
21 | obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o | ||
22 | obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o | ||
23 | obj-$(CONFIG_PCI) += pci.o | ||
24 | obj-$(CONFIG_KGDB) += ppc-stub.o | ||
25 | obj-$(CONFIG_SMP) += smp.o smp-tbsync.o | ||
26 | obj-$(CONFIG_TAU) += temp.o | ||
27 | obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o | ||
28 | obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o | ||
29 | |||
30 | ifndef CONFIG_MATH_EMULATION | ||
31 | obj-$(CONFIG_8xx) += softemu8xx.o | ||
32 | endif | ||
33 | |||
diff --git a/arch/ppc/kernel/align.c b/arch/ppc/kernel/align.c new file mode 100644 index 000000000000..79c929475037 --- /dev/null +++ b/arch/ppc/kernel/align.c | |||
@@ -0,0 +1,398 @@ | |||
1 | /* | ||
2 | * align.c - handle alignment exceptions for the Power PC. | ||
3 | * | ||
4 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | ||
5 | * Copyright (c) 1998-1999 TiVo, Inc. | ||
6 | * PowerPC 403GCX modifications. | ||
7 | * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> | ||
8 | * PowerPC 403GCX/405GP modifications. | ||
9 | */ | ||
10 | #include <linux/config.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <asm/ptrace.h> | ||
14 | #include <asm/processor.h> | ||
15 | #include <asm/uaccess.h> | ||
16 | #include <asm/system.h> | ||
17 | #include <asm/cache.h> | ||
18 | |||
19 | struct aligninfo { | ||
20 | unsigned char len; | ||
21 | unsigned char flags; | ||
22 | }; | ||
23 | |||
24 | #if defined(CONFIG_4xx) || defined(CONFIG_POWER4) || defined(CONFIG_BOOKE) | ||
25 | #define OPCD(inst) (((inst) & 0xFC000000) >> 26) | ||
26 | #define RS(inst) (((inst) & 0x03E00000) >> 21) | ||
27 | #define RA(inst) (((inst) & 0x001F0000) >> 16) | ||
28 | #define IS_XFORM(code) ((code) == 31) | ||
29 | #endif | ||
30 | |||
31 | #define INVALID { 0, 0 } | ||
32 | |||
33 | #define LD 1 /* load */ | ||
34 | #define ST 2 /* store */ | ||
35 | #define SE 4 /* sign-extend value */ | ||
36 | #define F 8 /* to/from fp regs */ | ||
37 | #define U 0x10 /* update index register */ | ||
38 | #define M 0x20 /* multiple load/store */ | ||
39 | #define S 0x40 /* single-precision fp, or byte-swap value */ | ||
40 | #define SX 0x40 /* byte count in XER */ | ||
41 | #define HARD 0x80 /* string, stwcx. */ | ||
42 | |||
43 | #define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */ | ||
44 | |||
45 | /* | ||
46 | * The PowerPC stores certain bits of the instruction that caused the | ||
47 | * alignment exception in the DSISR register. This array maps those | ||
48 | * bits to information about the operand length and what the | ||
49 | * instruction would do. | ||
50 | */ | ||
51 | static struct aligninfo aligninfo[128] = { | ||
52 | { 4, LD }, /* 00 0 0000: lwz / lwarx */ | ||
53 | INVALID, /* 00 0 0001 */ | ||
54 | { 4, ST }, /* 00 0 0010: stw */ | ||
55 | INVALID, /* 00 0 0011 */ | ||
56 | { 2, LD }, /* 00 0 0100: lhz */ | ||
57 | { 2, LD+SE }, /* 00 0 0101: lha */ | ||
58 | { 2, ST }, /* 00 0 0110: sth */ | ||
59 | { 4, LD+M }, /* 00 0 0111: lmw */ | ||
60 | { 4, LD+F+S }, /* 00 0 1000: lfs */ | ||
61 | { 8, LD+F }, /* 00 0 1001: lfd */ | ||
62 | { 4, ST+F+S }, /* 00 0 1010: stfs */ | ||
63 | { 8, ST+F }, /* 00 0 1011: stfd */ | ||
64 | INVALID, /* 00 0 1100 */ | ||
65 | INVALID, /* 00 0 1101: ld/ldu/lwa */ | ||
66 | INVALID, /* 00 0 1110 */ | ||
67 | INVALID, /* 00 0 1111: std/stdu */ | ||
68 | { 4, LD+U }, /* 00 1 0000: lwzu */ | ||
69 | INVALID, /* 00 1 0001 */ | ||
70 | { 4, ST+U }, /* 00 1 0010: stwu */ | ||
71 | INVALID, /* 00 1 0011 */ | ||
72 | { 2, LD+U }, /* 00 1 0100: lhzu */ | ||
73 | { 2, LD+SE+U }, /* 00 1 0101: lhau */ | ||
74 | { 2, ST+U }, /* 00 1 0110: sthu */ | ||
75 | { 4, ST+M }, /* 00 1 0111: stmw */ | ||
76 | { 4, LD+F+S+U }, /* 00 1 1000: lfsu */ | ||
77 | { 8, LD+F+U }, /* 00 1 1001: lfdu */ | ||
78 | { 4, ST+F+S+U }, /* 00 1 1010: stfsu */ | ||
79 | { 8, ST+F+U }, /* 00 1 1011: stfdu */ | ||
80 | INVALID, /* 00 1 1100 */ | ||
81 | INVALID, /* 00 1 1101 */ | ||
82 | INVALID, /* 00 1 1110 */ | ||
83 | INVALID, /* 00 1 1111 */ | ||
84 | INVALID, /* 01 0 0000: ldx */ | ||
85 | INVALID, /* 01 0 0001 */ | ||
86 | INVALID, /* 01 0 0010: stdx */ | ||
87 | INVALID, /* 01 0 0011 */ | ||
88 | INVALID, /* 01 0 0100 */ | ||
89 | INVALID, /* 01 0 0101: lwax */ | ||
90 | INVALID, /* 01 0 0110 */ | ||
91 | INVALID, /* 01 0 0111 */ | ||
92 | { 4, LD+M+HARD+SX }, /* 01 0 1000: lswx */ | ||
93 | { 4, LD+M+HARD }, /* 01 0 1001: lswi */ | ||
94 | { 4, ST+M+HARD+SX }, /* 01 0 1010: stswx */ | ||
95 | { 4, ST+M+HARD }, /* 01 0 1011: stswi */ | ||
96 | INVALID, /* 01 0 1100 */ | ||
97 | INVALID, /* 01 0 1101 */ | ||
98 | INVALID, /* 01 0 1110 */ | ||
99 | INVALID, /* 01 0 1111 */ | ||
100 | INVALID, /* 01 1 0000: ldux */ | ||
101 | INVALID, /* 01 1 0001 */ | ||
102 | INVALID, /* 01 1 0010: stdux */ | ||
103 | INVALID, /* 01 1 0011 */ | ||
104 | INVALID, /* 01 1 0100 */ | ||
105 | INVALID, /* 01 1 0101: lwaux */ | ||
106 | INVALID, /* 01 1 0110 */ | ||
107 | INVALID, /* 01 1 0111 */ | ||
108 | INVALID, /* 01 1 1000 */ | ||
109 | INVALID, /* 01 1 1001 */ | ||
110 | INVALID, /* 01 1 1010 */ | ||
111 | INVALID, /* 01 1 1011 */ | ||
112 | INVALID, /* 01 1 1100 */ | ||
113 | INVALID, /* 01 1 1101 */ | ||
114 | INVALID, /* 01 1 1110 */ | ||
115 | INVALID, /* 01 1 1111 */ | ||
116 | INVALID, /* 10 0 0000 */ | ||
117 | INVALID, /* 10 0 0001 */ | ||
118 | { 0, ST+HARD }, /* 10 0 0010: stwcx. */ | ||
119 | INVALID, /* 10 0 0011 */ | ||
120 | INVALID, /* 10 0 0100 */ | ||
121 | INVALID, /* 10 0 0101 */ | ||
122 | INVALID, /* 10 0 0110 */ | ||
123 | INVALID, /* 10 0 0111 */ | ||
124 | { 4, LD+S }, /* 10 0 1000: lwbrx */ | ||
125 | INVALID, /* 10 0 1001 */ | ||
126 | { 4, ST+S }, /* 10 0 1010: stwbrx */ | ||
127 | INVALID, /* 10 0 1011 */ | ||
128 | { 2, LD+S }, /* 10 0 1100: lhbrx */ | ||
129 | INVALID, /* 10 0 1101 */ | ||
130 | { 2, ST+S }, /* 10 0 1110: sthbrx */ | ||
131 | INVALID, /* 10 0 1111 */ | ||
132 | INVALID, /* 10 1 0000 */ | ||
133 | INVALID, /* 10 1 0001 */ | ||
134 | INVALID, /* 10 1 0010 */ | ||
135 | INVALID, /* 10 1 0011 */ | ||
136 | INVALID, /* 10 1 0100 */ | ||
137 | INVALID, /* 10 1 0101 */ | ||
138 | INVALID, /* 10 1 0110 */ | ||
139 | INVALID, /* 10 1 0111 */ | ||
140 | INVALID, /* 10 1 1000 */ | ||
141 | INVALID, /* 10 1 1001 */ | ||
142 | INVALID, /* 10 1 1010 */ | ||
143 | INVALID, /* 10 1 1011 */ | ||
144 | INVALID, /* 10 1 1100 */ | ||
145 | INVALID, /* 10 1 1101 */ | ||
146 | INVALID, /* 10 1 1110 */ | ||
147 | { 0, ST+HARD }, /* 10 1 1111: dcbz */ | ||
148 | { 4, LD }, /* 11 0 0000: lwzx */ | ||
149 | INVALID, /* 11 0 0001 */ | ||
150 | { 4, ST }, /* 11 0 0010: stwx */ | ||
151 | INVALID, /* 11 0 0011 */ | ||
152 | { 2, LD }, /* 11 0 0100: lhzx */ | ||
153 | { 2, LD+SE }, /* 11 0 0101: lhax */ | ||
154 | { 2, ST }, /* 11 0 0110: sthx */ | ||
155 | INVALID, /* 11 0 0111 */ | ||
156 | { 4, LD+F+S }, /* 11 0 1000: lfsx */ | ||
157 | { 8, LD+F }, /* 11 0 1001: lfdx */ | ||
158 | { 4, ST+F+S }, /* 11 0 1010: stfsx */ | ||
159 | { 8, ST+F }, /* 11 0 1011: stfdx */ | ||
160 | INVALID, /* 11 0 1100 */ | ||
161 | INVALID, /* 11 0 1101: lmd */ | ||
162 | INVALID, /* 11 0 1110 */ | ||
163 | INVALID, /* 11 0 1111: stmd */ | ||
164 | { 4, LD+U }, /* 11 1 0000: lwzux */ | ||
165 | INVALID, /* 11 1 0001 */ | ||
166 | { 4, ST+U }, /* 11 1 0010: stwux */ | ||
167 | INVALID, /* 11 1 0011 */ | ||
168 | { 2, LD+U }, /* 11 1 0100: lhzux */ | ||
169 | { 2, LD+SE+U }, /* 11 1 0101: lhaux */ | ||
170 | { 2, ST+U }, /* 11 1 0110: sthux */ | ||
171 | INVALID, /* 11 1 0111 */ | ||
172 | { 4, LD+F+S+U }, /* 11 1 1000: lfsux */ | ||
173 | { 8, LD+F+U }, /* 11 1 1001: lfdux */ | ||
174 | { 4, ST+F+S+U }, /* 11 1 1010: stfsux */ | ||
175 | { 8, ST+F+U }, /* 11 1 1011: stfdux */ | ||
176 | INVALID, /* 11 1 1100 */ | ||
177 | INVALID, /* 11 1 1101 */ | ||
178 | INVALID, /* 11 1 1110 */ | ||
179 | INVALID, /* 11 1 1111 */ | ||
180 | }; | ||
181 | |||
182 | #define SWAP(a, b) (t = (a), (a) = (b), (b) = t) | ||
183 | |||
184 | int | ||
185 | fix_alignment(struct pt_regs *regs) | ||
186 | { | ||
187 | int instr, nb, flags; | ||
188 | #if defined(CONFIG_4xx) || defined(CONFIG_POWER4) || defined(CONFIG_BOOKE) | ||
189 | int opcode, f1, f2, f3; | ||
190 | #endif | ||
191 | int i, t; | ||
192 | int reg, areg; | ||
193 | int offset, nb0; | ||
194 | unsigned char __user *addr; | ||
195 | unsigned char *rptr; | ||
196 | union { | ||
197 | long l; | ||
198 | float f; | ||
199 | double d; | ||
200 | unsigned char v[8]; | ||
201 | } data; | ||
202 | |||
203 | CHECK_FULL_REGS(regs); | ||
204 | |||
205 | #if defined(CONFIG_4xx) || defined(CONFIG_POWER4) || defined(CONFIG_BOOKE) | ||
206 | /* The 4xx-family & Book-E processors have no DSISR register, | ||
207 | * so we emulate it. | ||
208 | * The POWER4 has a DSISR register but doesn't set it on | ||
209 | * an alignment fault. -- paulus | ||
210 | */ | ||
211 | |||
212 | if (__get_user(instr, (unsigned int __user *) regs->nip)) | ||
213 | return 0; | ||
214 | opcode = OPCD(instr); | ||
215 | reg = RS(instr); | ||
216 | areg = RA(instr); | ||
217 | |||
218 | if (!IS_XFORM(opcode)) { | ||
219 | f1 = 0; | ||
220 | f2 = (instr & 0x04000000) >> 26; | ||
221 | f3 = (instr & 0x78000000) >> 27; | ||
222 | } else { | ||
223 | f1 = (instr & 0x00000006) >> 1; | ||
224 | f2 = (instr & 0x00000040) >> 6; | ||
225 | f3 = (instr & 0x00000780) >> 7; | ||
226 | } | ||
227 | |||
228 | instr = ((f1 << 5) | (f2 << 4) | f3); | ||
229 | #else | ||
230 | reg = (regs->dsisr >> 5) & 0x1f; /* source/dest register */ | ||
231 | areg = regs->dsisr & 0x1f; /* register to update */ | ||
232 | instr = (regs->dsisr >> 10) & 0x7f; | ||
233 | #endif | ||
234 | |||
235 | nb = aligninfo[instr].len; | ||
236 | if (nb == 0) { | ||
237 | long __user *p; | ||
238 | int i; | ||
239 | |||
240 | if (instr != DCBZ) | ||
241 | return 0; /* too hard or invalid instruction */ | ||
242 | /* | ||
243 | * The dcbz (data cache block zero) instruction | ||
244 | * gives an alignment fault if used on non-cacheable | ||
245 | * memory. We handle the fault mainly for the | ||
246 | * case when we are running with the cache disabled | ||
247 | * for debugging. | ||
248 | */ | ||
249 | p = (long __user *) (regs->dar & -L1_CACHE_BYTES); | ||
250 | if (user_mode(regs) | ||
251 | && !access_ok(VERIFY_WRITE, p, L1_CACHE_BYTES)) | ||
252 | return -EFAULT; | ||
253 | for (i = 0; i < L1_CACHE_BYTES / sizeof(long); ++i) | ||
254 | if (__put_user(0, p+i)) | ||
255 | return -EFAULT; | ||
256 | return 1; | ||
257 | } | ||
258 | |||
259 | flags = aligninfo[instr].flags; | ||
260 | if ((flags & (LD|ST)) == 0) | ||
261 | return 0; | ||
262 | |||
263 | /* For the 4xx-family & Book-E processors, the 'dar' field of the | ||
264 | * pt_regs structure is overloaded and is really from the DEAR. | ||
265 | */ | ||
266 | |||
267 | addr = (unsigned char __user *)regs->dar; | ||
268 | |||
269 | if (flags & M) { | ||
270 | /* lmw, stmw, lswi/x, stswi/x */ | ||
271 | nb0 = 0; | ||
272 | if (flags & HARD) { | ||
273 | if (flags & SX) { | ||
274 | nb = regs->xer & 127; | ||
275 | if (nb == 0) | ||
276 | return 1; | ||
277 | } else { | ||
278 | if (__get_user(instr, | ||
279 | (unsigned int __user *)regs->nip)) | ||
280 | return 0; | ||
281 | nb = (instr >> 11) & 0x1f; | ||
282 | if (nb == 0) | ||
283 | nb = 32; | ||
284 | } | ||
285 | if (nb + reg * 4 > 128) { | ||
286 | nb0 = nb + reg * 4 - 128; | ||
287 | nb = 128 - reg * 4; | ||
288 | } | ||
289 | } else { | ||
290 | /* lwm, stmw */ | ||
291 | nb = (32 - reg) * 4; | ||
292 | } | ||
293 | rptr = (unsigned char *) ®s->gpr[reg]; | ||
294 | if (flags & LD) { | ||
295 | for (i = 0; i < nb; ++i) | ||
296 | if (__get_user(rptr[i], addr+i)) | ||
297 | return -EFAULT; | ||
298 | if (nb0 > 0) { | ||
299 | rptr = (unsigned char *) ®s->gpr[0]; | ||
300 | addr += nb; | ||
301 | for (i = 0; i < nb0; ++i) | ||
302 | if (__get_user(rptr[i], addr+i)) | ||
303 | return -EFAULT; | ||
304 | } | ||
305 | for (; (i & 3) != 0; ++i) | ||
306 | rptr[i] = 0; | ||
307 | } else { | ||
308 | for (i = 0; i < nb; ++i) | ||
309 | if (__put_user(rptr[i], addr+i)) | ||
310 | return -EFAULT; | ||
311 | if (nb0 > 0) { | ||
312 | rptr = (unsigned char *) ®s->gpr[0]; | ||
313 | addr += nb; | ||
314 | for (i = 0; i < nb0; ++i) | ||
315 | if (__put_user(rptr[i], addr+i)) | ||
316 | return -EFAULT; | ||
317 | } | ||
318 | } | ||
319 | return 1; | ||
320 | } | ||
321 | |||
322 | offset = 0; | ||
323 | if (nb < 4) { | ||
324 | /* read/write the least significant bits */ | ||
325 | data.l = 0; | ||
326 | offset = 4 - nb; | ||
327 | } | ||
328 | |||
329 | /* Verify the address of the operand */ | ||
330 | if (user_mode(regs)) { | ||
331 | if (!access_ok((flags & ST? VERIFY_WRITE: VERIFY_READ), addr, nb)) | ||
332 | return -EFAULT; /* bad address */ | ||
333 | } | ||
334 | |||
335 | if (flags & F) { | ||
336 | preempt_disable(); | ||
337 | if (regs->msr & MSR_FP) | ||
338 | giveup_fpu(current); | ||
339 | preempt_enable(); | ||
340 | } | ||
341 | |||
342 | /* If we read the operand, copy it in, else get register values */ | ||
343 | if (flags & LD) { | ||
344 | for (i = 0; i < nb; ++i) | ||
345 | if (__get_user(data.v[offset+i], addr+i)) | ||
346 | return -EFAULT; | ||
347 | } else if (flags & F) { | ||
348 | data.d = current->thread.fpr[reg]; | ||
349 | } else { | ||
350 | data.l = regs->gpr[reg]; | ||
351 | } | ||
352 | |||
353 | switch (flags & ~U) { | ||
354 | case LD+SE: /* sign extend */ | ||
355 | if (data.v[2] >= 0x80) | ||
356 | data.v[0] = data.v[1] = -1; | ||
357 | break; | ||
358 | |||
359 | case LD+S: /* byte-swap */ | ||
360 | case ST+S: | ||
361 | if (nb == 2) { | ||
362 | SWAP(data.v[2], data.v[3]); | ||
363 | } else { | ||
364 | SWAP(data.v[0], data.v[3]); | ||
365 | SWAP(data.v[1], data.v[2]); | ||
366 | } | ||
367 | break; | ||
368 | |||
369 | /* Single-precision FP load and store require conversions... */ | ||
370 | case LD+F+S: | ||
371 | preempt_disable(); | ||
372 | enable_kernel_fp(); | ||
373 | cvt_fd(&data.f, &data.d, ¤t->thread.fpscr); | ||
374 | preempt_enable(); | ||
375 | break; | ||
376 | case ST+F+S: | ||
377 | preempt_disable(); | ||
378 | enable_kernel_fp(); | ||
379 | cvt_df(&data.d, &data.f, ¤t->thread.fpscr); | ||
380 | preempt_enable(); | ||
381 | break; | ||
382 | } | ||
383 | |||
384 | if (flags & ST) { | ||
385 | for (i = 0; i < nb; ++i) | ||
386 | if (__put_user(data.v[offset+i], addr+i)) | ||
387 | return -EFAULT; | ||
388 | } else if (flags & F) { | ||
389 | current->thread.fpr[reg] = data.d; | ||
390 | } else { | ||
391 | regs->gpr[reg] = data.l; | ||
392 | } | ||
393 | |||
394 | if (flags & U) | ||
395 | regs->gpr[areg] = regs->dar; | ||
396 | |||
397 | return 1; | ||
398 | } | ||
diff --git a/arch/ppc/kernel/asm-offsets.c b/arch/ppc/kernel/asm-offsets.c new file mode 100644 index 000000000000..d9ad1d776d0e --- /dev/null +++ b/arch/ppc/kernel/asm-offsets.c | |||
@@ -0,0 +1,146 @@ | |||
1 | /* | ||
2 | * This program is used to generate definitions needed by | ||
3 | * assembly language modules. | ||
4 | * | ||
5 | * We use the technique used in the OSF Mach kernel code: | ||
6 | * generate asm statements containing #defines, | ||
7 | * compile this file to assembler, and then extract the | ||
8 | * #defines from the assembly-language output. | ||
9 | */ | ||
10 | |||
11 | #include <linux/config.h> | ||
12 | #include <linux/signal.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/ptrace.h> | ||
19 | #include <linux/suspend.h> | ||
20 | #include <linux/mman.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <asm/io.h> | ||
23 | #include <asm/page.h> | ||
24 | #include <asm/pgtable.h> | ||
25 | #include <asm/processor.h> | ||
26 | #include <asm/cputable.h> | ||
27 | #include <asm/thread_info.h> | ||
28 | |||
29 | #define DEFINE(sym, val) \ | ||
30 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
31 | |||
32 | #define BLANK() asm volatile("\n->" : : ) | ||
33 | |||
34 | int | ||
35 | main(void) | ||
36 | { | ||
37 | DEFINE(THREAD, offsetof(struct task_struct, thread)); | ||
38 | DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info)); | ||
39 | DEFINE(MM, offsetof(struct task_struct, mm)); | ||
40 | DEFINE(PTRACE, offsetof(struct task_struct, ptrace)); | ||
41 | DEFINE(KSP, offsetof(struct thread_struct, ksp)); | ||
42 | DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); | ||
43 | DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall)); | ||
44 | DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); | ||
45 | DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); | ||
46 | DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0])); | ||
47 | DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr)); | ||
48 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
49 | DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0)); | ||
50 | DEFINE(PT_PTRACED, PT_PTRACED); | ||
51 | #endif | ||
52 | #ifdef CONFIG_ALTIVEC | ||
53 | DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0])); | ||
54 | DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); | ||
55 | DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr)); | ||
56 | DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); | ||
57 | #endif /* CONFIG_ALTIVEC */ | ||
58 | #ifdef CONFIG_SPE | ||
59 | DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0])); | ||
60 | DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc)); | ||
61 | DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr)); | ||
62 | DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe)); | ||
63 | #endif /* CONFIG_SPE */ | ||
64 | /* Interrupt register frame */ | ||
65 | DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD); | ||
66 | DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); | ||
67 | /* in fact we only use gpr0 - gpr9 and gpr20 - gpr23 */ | ||
68 | DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0])); | ||
69 | DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1])); | ||
70 | DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2])); | ||
71 | DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3])); | ||
72 | DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4])); | ||
73 | DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5])); | ||
74 | DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6])); | ||
75 | DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7])); | ||
76 | DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8])); | ||
77 | DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9])); | ||
78 | DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10])); | ||
79 | DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11])); | ||
80 | DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12])); | ||
81 | DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13])); | ||
82 | DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14])); | ||
83 | DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15])); | ||
84 | DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16])); | ||
85 | DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17])); | ||
86 | DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18])); | ||
87 | DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19])); | ||
88 | DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20])); | ||
89 | DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21])); | ||
90 | DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22])); | ||
91 | DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23])); | ||
92 | DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24])); | ||
93 | DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25])); | ||
94 | DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26])); | ||
95 | DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27])); | ||
96 | DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28])); | ||
97 | DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29])); | ||
98 | DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30])); | ||
99 | DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31])); | ||
100 | /* Note: these symbols include _ because they overlap with special | ||
101 | * register names | ||
102 | */ | ||
103 | DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip)); | ||
104 | DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr)); | ||
105 | DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr)); | ||
106 | DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link)); | ||
107 | DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr)); | ||
108 | DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq)); | ||
109 | DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer)); | ||
110 | DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); | ||
111 | DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); | ||
112 | /* The PowerPC 400-class & Book-E processors have neither the DAR nor the DSISR | ||
113 | * SPRs. Hence, we overload them to hold the similar DEAR and ESR SPRs | ||
114 | * for such processors. For critical interrupts we use them to | ||
115 | * hold SRR0 and SRR1. | ||
116 | */ | ||
117 | DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); | ||
118 | DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); | ||
119 | DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3)); | ||
120 | DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result)); | ||
121 | DEFINE(TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap)); | ||
122 | DEFINE(CLONE_VM, CLONE_VM); | ||
123 | DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); | ||
124 | DEFINE(MM_PGD, offsetof(struct mm_struct, pgd)); | ||
125 | |||
126 | /* About the CPU features table */ | ||
127 | DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec)); | ||
128 | DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask)); | ||
129 | DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value)); | ||
130 | DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); | ||
131 | DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); | ||
132 | |||
133 | DEFINE(TI_TASK, offsetof(struct thread_info, task)); | ||
134 | DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); | ||
135 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | ||
136 | DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); | ||
137 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); | ||
138 | DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); | ||
139 | |||
140 | DEFINE(pbe_address, offsetof(struct pbe, address)); | ||
141 | DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); | ||
142 | DEFINE(pbe_next, offsetof(struct pbe, next)); | ||
143 | |||
144 | DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); | ||
145 | return 0; | ||
146 | } | ||
diff --git a/arch/ppc/kernel/bitops.c b/arch/ppc/kernel/bitops.c new file mode 100644 index 000000000000..7f53d193968b --- /dev/null +++ b/arch/ppc/kernel/bitops.c | |||
@@ -0,0 +1,126 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1996 Paul Mackerras. | ||
3 | */ | ||
4 | |||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/bitops.h> | ||
7 | |||
8 | /* | ||
9 | * If the bitops are not inlined in bitops.h, they are defined here. | ||
10 | * -- paulus | ||
11 | */ | ||
12 | #if !__INLINE_BITOPS | ||
13 | void set_bit(int nr, volatile void * addr) | ||
14 | { | ||
15 | unsigned long old; | ||
16 | unsigned long mask = 1 << (nr & 0x1f); | ||
17 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
18 | |||
19 | __asm__ __volatile__(SMP_WMB "\n\ | ||
20 | 1: lwarx %0,0,%3 \n\ | ||
21 | or %0,%0,%2 \n" | ||
22 | PPC405_ERR77(0,%3) | ||
23 | " stwcx. %0,0,%3 \n\ | ||
24 | bne 1b" | ||
25 | SMP_MB | ||
26 | : "=&r" (old), "=m" (*p) | ||
27 | : "r" (mask), "r" (p), "m" (*p) | ||
28 | : "cc" ); | ||
29 | } | ||
30 | |||
31 | void clear_bit(int nr, volatile void *addr) | ||
32 | { | ||
33 | unsigned long old; | ||
34 | unsigned long mask = 1 << (nr & 0x1f); | ||
35 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
36 | |||
37 | __asm__ __volatile__(SMP_WMB "\n\ | ||
38 | 1: lwarx %0,0,%3 \n\ | ||
39 | andc %0,%0,%2 \n" | ||
40 | PPC405_ERR77(0,%3) | ||
41 | " stwcx. %0,0,%3 \n\ | ||
42 | bne 1b" | ||
43 | SMP_MB | ||
44 | : "=&r" (old), "=m" (*p) | ||
45 | : "r" (mask), "r" (p), "m" (*p) | ||
46 | : "cc"); | ||
47 | } | ||
48 | |||
49 | void change_bit(int nr, volatile void *addr) | ||
50 | { | ||
51 | unsigned long old; | ||
52 | unsigned long mask = 1 << (nr & 0x1f); | ||
53 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
54 | |||
55 | __asm__ __volatile__(SMP_WMB "\n\ | ||
56 | 1: lwarx %0,0,%3 \n\ | ||
57 | xor %0,%0,%2 \n" | ||
58 | PPC405_ERR77(0,%3) | ||
59 | " stwcx. %0,0,%3 \n\ | ||
60 | bne 1b" | ||
61 | SMP_MB | ||
62 | : "=&r" (old), "=m" (*p) | ||
63 | : "r" (mask), "r" (p), "m" (*p) | ||
64 | : "cc"); | ||
65 | } | ||
66 | |||
67 | int test_and_set_bit(int nr, volatile void *addr) | ||
68 | { | ||
69 | unsigned int old, t; | ||
70 | unsigned int mask = 1 << (nr & 0x1f); | ||
71 | volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5); | ||
72 | |||
73 | __asm__ __volatile__(SMP_WMB "\n\ | ||
74 | 1: lwarx %0,0,%4 \n\ | ||
75 | or %1,%0,%3 \n" | ||
76 | PPC405_ERR77(0,%4) | ||
77 | " stwcx. %1,0,%4 \n\ | ||
78 | bne 1b" | ||
79 | SMP_MB | ||
80 | : "=&r" (old), "=&r" (t), "=m" (*p) | ||
81 | : "r" (mask), "r" (p), "m" (*p) | ||
82 | : "cc"); | ||
83 | |||
84 | return (old & mask) != 0; | ||
85 | } | ||
86 | |||
87 | int test_and_clear_bit(int nr, volatile void *addr) | ||
88 | { | ||
89 | unsigned int old, t; | ||
90 | unsigned int mask = 1 << (nr & 0x1f); | ||
91 | volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5); | ||
92 | |||
93 | __asm__ __volatile__(SMP_WMB "\n\ | ||
94 | 1: lwarx %0,0,%4 \n\ | ||
95 | andc %1,%0,%3 \n" | ||
96 | PPC405_ERR77(0,%4) | ||
97 | " stwcx. %1,0,%4 \n\ | ||
98 | bne 1b" | ||
99 | SMP_MB | ||
100 | : "=&r" (old), "=&r" (t), "=m" (*p) | ||
101 | : "r" (mask), "r" (p), "m" (*p) | ||
102 | : "cc"); | ||
103 | |||
104 | return (old & mask) != 0; | ||
105 | } | ||
106 | |||
107 | int test_and_change_bit(int nr, volatile void *addr) | ||
108 | { | ||
109 | unsigned int old, t; | ||
110 | unsigned int mask = 1 << (nr & 0x1f); | ||
111 | volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5); | ||
112 | |||
113 | __asm__ __volatile__(SMP_WMB "\n\ | ||
114 | 1: lwarx %0,0,%4 \n\ | ||
115 | xor %1,%0,%3 \n" | ||
116 | PPC405_ERR77(0,%4) | ||
117 | " stwcx. %1,0,%4 \n\ | ||
118 | bne 1b" | ||
119 | SMP_MB | ||
120 | : "=&r" (old), "=&r" (t), "=m" (*p) | ||
121 | : "r" (mask), "r" (p), "m" (*p) | ||
122 | : "cc"); | ||
123 | |||
124 | return (old & mask) != 0; | ||
125 | } | ||
126 | #endif /* !__INLINE_BITOPS */ | ||
diff --git a/arch/ppc/kernel/cpu_setup_6xx.S b/arch/ppc/kernel/cpu_setup_6xx.S new file mode 100644 index 000000000000..74f781b486a3 --- /dev/null +++ b/arch/ppc/kernel/cpu_setup_6xx.S | |||
@@ -0,0 +1,440 @@ | |||
1 | /* | ||
2 | * This file contains low level CPU setup functions. | ||
3 | * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <asm/processor.h> | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/ppc_asm.h> | ||
16 | #include <asm/cputable.h> | ||
17 | #include <asm/ppc_asm.h> | ||
18 | #include <asm/offsets.h> | ||
19 | #include <asm/cache.h> | ||
20 | |||
21 | _GLOBAL(__setup_cpu_601) | ||
22 | blr | ||
23 | _GLOBAL(__setup_cpu_603) | ||
24 | b setup_common_caches | ||
25 | _GLOBAL(__setup_cpu_604) | ||
26 | mflr r4 | ||
27 | bl setup_common_caches | ||
28 | bl setup_604_hid0 | ||
29 | mtlr r4 | ||
30 | blr | ||
31 | _GLOBAL(__setup_cpu_750) | ||
32 | mflr r4 | ||
33 | bl setup_common_caches | ||
34 | bl setup_750_7400_hid0 | ||
35 | mtlr r4 | ||
36 | blr | ||
37 | _GLOBAL(__setup_cpu_750cx) | ||
38 | mflr r4 | ||
39 | bl setup_common_caches | ||
40 | bl setup_750_7400_hid0 | ||
41 | bl setup_750cx | ||
42 | mtlr r4 | ||
43 | blr | ||
44 | _GLOBAL(__setup_cpu_750fx) | ||
45 | mflr r4 | ||
46 | bl setup_common_caches | ||
47 | bl setup_750_7400_hid0 | ||
48 | bl setup_750fx | ||
49 | mtlr r4 | ||
50 | blr | ||
51 | _GLOBAL(__setup_cpu_7400) | ||
52 | mflr r4 | ||
53 | bl setup_7400_workarounds | ||
54 | bl setup_common_caches | ||
55 | bl setup_750_7400_hid0 | ||
56 | mtlr r4 | ||
57 | blr | ||
58 | _GLOBAL(__setup_cpu_7410) | ||
59 | mflr r4 | ||
60 | bl setup_7410_workarounds | ||
61 | bl setup_common_caches | ||
62 | bl setup_750_7400_hid0 | ||
63 | li r3,0 | ||
64 | mtspr SPRN_L2CR2,r3 | ||
65 | mtlr r4 | ||
66 | blr | ||
67 | _GLOBAL(__setup_cpu_745x) | ||
68 | mflr r4 | ||
69 | bl setup_common_caches | ||
70 | bl setup_745x_specifics | ||
71 | mtlr r4 | ||
72 | blr | ||
73 | |||
74 | /* Enable caches for 603's, 604, 750 & 7400 */ | ||
75 | setup_common_caches: | ||
76 | mfspr r11,SPRN_HID0 | ||
77 | andi. r0,r11,HID0_DCE | ||
78 | ori r11,r11,HID0_ICE|HID0_DCE | ||
79 | ori r8,r11,HID0_ICFI | ||
80 | bne 1f /* don't invalidate the D-cache */ | ||
81 | ori r8,r8,HID0_DCI /* unless it wasn't enabled */ | ||
82 | 1: sync | ||
83 | mtspr SPRN_HID0,r8 /* enable and invalidate caches */ | ||
84 | sync | ||
85 | mtspr SPRN_HID0,r11 /* enable caches */ | ||
86 | sync | ||
87 | isync | ||
88 | blr | ||
89 | |||
90 | /* 604, 604e, 604ev, ... | ||
91 | * Enable superscalar execution & branch history table | ||
92 | */ | ||
93 | setup_604_hid0: | ||
94 | mfspr r11,SPRN_HID0 | ||
95 | ori r11,r11,HID0_SIED|HID0_BHTE | ||
96 | ori r8,r11,HID0_BTCD | ||
97 | sync | ||
98 | mtspr SPRN_HID0,r8 /* flush branch target address cache */ | ||
99 | sync /* on 604e/604r */ | ||
100 | mtspr SPRN_HID0,r11 | ||
101 | sync | ||
102 | isync | ||
103 | blr | ||
104 | |||
105 | /* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some | ||
106 | * erratas we work around here. | ||
107 | * Moto MPC710CE.pdf describes them, those are errata | ||
108 | * #3, #4 and #5 | ||
109 | * Note that we assume the firmware didn't choose to | ||
110 | * apply other workarounds (there are other ones documented | ||
111 | * in the .pdf). It appear that Apple firmware only works | ||
112 | * around #3 and with the same fix we use. We may want to | ||
113 | * check if the CPU is using 60x bus mode in which case | ||
114 | * the workaround for errata #4 is useless. Also, we may | ||
115 | * want to explicitely clear HID0_NOPDST as this is not | ||
116 | * needed once we have applied workaround #5 (though it's | ||
117 | * not set by Apple's firmware at least). | ||
118 | */ | ||
119 | setup_7400_workarounds: | ||
120 | mfpvr r3 | ||
121 | rlwinm r3,r3,0,20,31 | ||
122 | cmpwi 0,r3,0x0207 | ||
123 | ble 1f | ||
124 | blr | ||
125 | setup_7410_workarounds: | ||
126 | mfpvr r3 | ||
127 | rlwinm r3,r3,0,20,31 | ||
128 | cmpwi 0,r3,0x0100 | ||
129 | bnelr | ||
130 | 1: | ||
131 | mfspr r11,SPRN_MSSSR0 | ||
132 | /* Errata #3: Set L1OPQ_SIZE to 0x10 */ | ||
133 | rlwinm r11,r11,0,9,6 | ||
134 | oris r11,r11,0x0100 | ||
135 | /* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */ | ||
136 | oris r11,r11,0x0002 | ||
137 | /* Errata #5: Set DRLT_SIZE to 0x01 */ | ||
138 | rlwinm r11,r11,0,5,2 | ||
139 | oris r11,r11,0x0800 | ||
140 | sync | ||
141 | mtspr SPRN_MSSSR0,r11 | ||
142 | sync | ||
143 | isync | ||
144 | blr | ||
145 | |||
146 | /* 740/750/7400/7410 | ||
147 | * Enable Store Gathering (SGE), Address Brodcast (ABE), | ||
148 | * Branch History Table (BHTE), Branch Target ICache (BTIC) | ||
149 | * Dynamic Power Management (DPM), Speculative (SPD) | ||
150 | * Clear Instruction cache throttling (ICTC) | ||
151 | */ | ||
152 | setup_750_7400_hid0: | ||
153 | mfspr r11,SPRN_HID0 | ||
154 | ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC | ||
155 | BEGIN_FTR_SECTION | ||
156 | oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */ | ||
157 | END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) | ||
158 | li r3,HID0_SPD | ||
159 | andc r11,r11,r3 /* clear SPD: enable speculative */ | ||
160 | li r3,0 | ||
161 | mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */ | ||
162 | isync | ||
163 | mtspr SPRN_HID0,r11 | ||
164 | sync | ||
165 | isync | ||
166 | blr | ||
167 | |||
168 | /* 750cx specific | ||
169 | * Looks like we have to disable NAP feature for some PLL settings... | ||
170 | * (waiting for confirmation) | ||
171 | */ | ||
172 | setup_750cx: | ||
173 | mfspr r10, SPRN_HID1 | ||
174 | rlwinm r10,r10,4,28,31 | ||
175 | cmpwi cr0,r10,7 | ||
176 | cmpwi cr1,r10,9 | ||
177 | cmpwi cr2,r10,11 | ||
178 | cror 4*cr0+eq,4*cr0+eq,4*cr1+eq | ||
179 | cror 4*cr0+eq,4*cr0+eq,4*cr2+eq | ||
180 | bnelr | ||
181 | lwz r6,CPU_SPEC_FEATURES(r5) | ||
182 | li r7,CPU_FTR_CAN_NAP | ||
183 | andc r6,r6,r7 | ||
184 | stw r6,CPU_SPEC_FEATURES(r5) | ||
185 | blr | ||
186 | |||
187 | /* 750fx specific | ||
188 | */ | ||
189 | setup_750fx: | ||
190 | blr | ||
191 | |||
192 | /* MPC 745x | ||
193 | * Enable Store Gathering (SGE), Branch Folding (FOLD) | ||
194 | * Branch History Table (BHTE), Branch Target ICache (BTIC) | ||
195 | * Dynamic Power Management (DPM), Speculative (SPD) | ||
196 | * Ensure our data cache instructions really operate. | ||
197 | * Timebase has to be running or we wouldn't have made it here, | ||
198 | * just ensure we don't disable it. | ||
199 | * Clear Instruction cache throttling (ICTC) | ||
200 | * Enable L2 HW prefetch | ||
201 | */ | ||
202 | setup_745x_specifics: | ||
203 | /* We check for the presence of an L3 cache setup by | ||
204 | * the firmware. If any, we disable NAP capability as | ||
205 | * it's known to be bogus on rev 2.1 and earlier | ||
206 | */ | ||
207 | mfspr r11,SPRN_L3CR | ||
208 | andis. r11,r11,L3CR_L3E@h | ||
209 | beq 1f | ||
210 | lwz r6,CPU_SPEC_FEATURES(r5) | ||
211 | andi. r0,r6,CPU_FTR_L3_DISABLE_NAP | ||
212 | beq 1f | ||
213 | li r7,CPU_FTR_CAN_NAP | ||
214 | andc r6,r6,r7 | ||
215 | stw r6,CPU_SPEC_FEATURES(r5) | ||
216 | 1: | ||
217 | mfspr r11,SPRN_HID0 | ||
218 | |||
219 | /* All of the bits we have to set..... | ||
220 | */ | ||
221 | ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_LRSTK | HID0_BTIC | ||
222 | BEGIN_FTR_SECTION | ||
223 | xori r11,r11,HID0_BTIC | ||
224 | END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) | ||
225 | BEGIN_FTR_SECTION | ||
226 | oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */ | ||
227 | END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) | ||
228 | |||
229 | /* All of the bits we have to clear.... | ||
230 | */ | ||
231 | li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI | ||
232 | andc r11,r11,r3 /* clear SPD: enable speculative */ | ||
233 | li r3,0 | ||
234 | |||
235 | mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */ | ||
236 | isync | ||
237 | mtspr SPRN_HID0,r11 | ||
238 | sync | ||
239 | isync | ||
240 | |||
241 | /* Enable L2 HW prefetch | ||
242 | */ | ||
243 | mfspr r3,SPRN_MSSCR0 | ||
244 | ori r3,r3,3 | ||
245 | sync | ||
246 | mtspr SPRN_MSSCR0,r3 | ||
247 | sync | ||
248 | isync | ||
249 | blr | ||
250 | |||
251 | /* Definitions for the table use to save CPU states */ | ||
252 | #define CS_HID0 0 | ||
253 | #define CS_HID1 4 | ||
254 | #define CS_HID2 8 | ||
255 | #define CS_MSSCR0 12 | ||
256 | #define CS_MSSSR0 16 | ||
257 | #define CS_ICTRL 20 | ||
258 | #define CS_LDSTCR 24 | ||
259 | #define CS_LDSTDB 28 | ||
260 | #define CS_SIZE 32 | ||
261 | |||
262 | .data | ||
263 | .balign L1_CACHE_LINE_SIZE | ||
264 | cpu_state_storage: | ||
265 | .space CS_SIZE | ||
266 | .balign L1_CACHE_LINE_SIZE,0 | ||
267 | .text | ||
268 | |||
269 | /* Called in normal context to backup CPU 0 state. This | ||
270 | * does not include cache settings. This function is also | ||
271 | * called for machine sleep. This does not include the MMU | ||
272 | * setup, BATs, etc... but rather the "special" registers | ||
273 | * like HID0, HID1, MSSCR0, etc... | ||
274 | */ | ||
275 | _GLOBAL(__save_cpu_setup) | ||
276 | /* Some CR fields are volatile, we back it up all */ | ||
277 | mfcr r7 | ||
278 | |||
279 | /* Get storage ptr */ | ||
280 | lis r5,cpu_state_storage@h | ||
281 | ori r5,r5,cpu_state_storage@l | ||
282 | |||
283 | /* Save HID0 (common to all CONFIG_6xx cpus) */ | ||
284 | mfspr r3,SPRN_HID0 | ||
285 | stw r3,CS_HID0(r5) | ||
286 | |||
287 | /* Now deal with CPU type dependent registers */ | ||
288 | mfspr r3,SPRN_PVR | ||
289 | srwi r3,r3,16 | ||
290 | cmplwi cr0,r3,0x8000 /* 7450 */ | ||
291 | cmplwi cr1,r3,0x000c /* 7400 */ | ||
292 | cmplwi cr2,r3,0x800c /* 7410 */ | ||
293 | cmplwi cr3,r3,0x8001 /* 7455 */ | ||
294 | cmplwi cr4,r3,0x8002 /* 7457 */ | ||
295 | cmplwi cr5,r3,0x8003 /* 7447A */ | ||
296 | cmplwi cr6,r3,0x7000 /* 750FX */ | ||
297 | /* cr1 is 7400 || 7410 */ | ||
298 | cror 4*cr1+eq,4*cr1+eq,4*cr2+eq | ||
299 | /* cr0 is 74xx */ | ||
300 | cror 4*cr0+eq,4*cr0+eq,4*cr3+eq | ||
301 | cror 4*cr0+eq,4*cr0+eq,4*cr4+eq | ||
302 | cror 4*cr0+eq,4*cr0+eq,4*cr1+eq | ||
303 | cror 4*cr0+eq,4*cr0+eq,4*cr5+eq | ||
304 | bne 1f | ||
305 | /* Backup 74xx specific regs */ | ||
306 | mfspr r4,SPRN_MSSCR0 | ||
307 | stw r4,CS_MSSCR0(r5) | ||
308 | mfspr r4,SPRN_MSSSR0 | ||
309 | stw r4,CS_MSSSR0(r5) | ||
310 | beq cr1,1f | ||
311 | /* Backup 745x specific registers */ | ||
312 | mfspr r4,SPRN_HID1 | ||
313 | stw r4,CS_HID1(r5) | ||
314 | mfspr r4,SPRN_ICTRL | ||
315 | stw r4,CS_ICTRL(r5) | ||
316 | mfspr r4,SPRN_LDSTCR | ||
317 | stw r4,CS_LDSTCR(r5) | ||
318 | mfspr r4,SPRN_LDSTDB | ||
319 | stw r4,CS_LDSTDB(r5) | ||
320 | 1: | ||
321 | bne cr6,1f | ||
322 | /* Backup 750FX specific registers */ | ||
323 | mfspr r4,SPRN_HID1 | ||
324 | stw r4,CS_HID1(r5) | ||
325 | /* If rev 2.x, backup HID2 */ | ||
326 | mfspr r3,SPRN_PVR | ||
327 | andi. r3,r3,0xff00 | ||
328 | cmpwi cr0,r3,0x0200 | ||
329 | bne 1f | ||
330 | mfspr r4,SPRN_HID2 | ||
331 | stw r4,CS_HID2(r5) | ||
332 | 1: | ||
333 | mtcr r7 | ||
334 | blr | ||
335 | |||
336 | /* Called with no MMU context (typically MSR:IR/DR off) to | ||
337 | * restore CPU state as backed up by the previous | ||
338 | * function. This does not include cache setting | ||
339 | */ | ||
340 | _GLOBAL(__restore_cpu_setup) | ||
341 | /* Some CR fields are volatile, we back it up all */ | ||
342 | mfcr r7 | ||
343 | |||
344 | /* Get storage ptr */ | ||
345 | lis r5,(cpu_state_storage-KERNELBASE)@h | ||
346 | ori r5,r5,cpu_state_storage@l | ||
347 | |||
348 | /* Restore HID0 */ | ||
349 | lwz r3,CS_HID0(r5) | ||
350 | sync | ||
351 | isync | ||
352 | mtspr SPRN_HID0,r3 | ||
353 | sync | ||
354 | isync | ||
355 | |||
356 | /* Now deal with CPU type dependent registers */ | ||
357 | mfspr r3,SPRN_PVR | ||
358 | srwi r3,r3,16 | ||
359 | cmplwi cr0,r3,0x8000 /* 7450 */ | ||
360 | cmplwi cr1,r3,0x000c /* 7400 */ | ||
361 | cmplwi cr2,r3,0x800c /* 7410 */ | ||
362 | cmplwi cr3,r3,0x8001 /* 7455 */ | ||
363 | cmplwi cr4,r3,0x8002 /* 7457 */ | ||
364 | cmplwi cr5,r3,0x8003 /* 7447A */ | ||
365 | cmplwi cr6,r3,0x7000 /* 750FX */ | ||
366 | /* cr1 is 7400 || 7410 */ | ||
367 | cror 4*cr1+eq,4*cr1+eq,4*cr2+eq | ||
368 | /* cr0 is 74xx */ | ||
369 | cror 4*cr0+eq,4*cr0+eq,4*cr3+eq | ||
370 | cror 4*cr0+eq,4*cr0+eq,4*cr4+eq | ||
371 | cror 4*cr0+eq,4*cr0+eq,4*cr1+eq | ||
372 | cror 4*cr0+eq,4*cr0+eq,4*cr5+eq | ||
373 | bne 2f | ||
374 | /* Restore 74xx specific regs */ | ||
375 | lwz r4,CS_MSSCR0(r5) | ||
376 | sync | ||
377 | mtspr SPRN_MSSCR0,r4 | ||
378 | sync | ||
379 | isync | ||
380 | lwz r4,CS_MSSSR0(r5) | ||
381 | sync | ||
382 | mtspr SPRN_MSSSR0,r4 | ||
383 | sync | ||
384 | isync | ||
385 | bne cr2,1f | ||
386 | /* Clear 7410 L2CR2 */ | ||
387 | li r4,0 | ||
388 | mtspr SPRN_L2CR2,r4 | ||
389 | 1: beq cr1,2f | ||
390 | /* Restore 745x specific registers */ | ||
391 | lwz r4,CS_HID1(r5) | ||
392 | sync | ||
393 | mtspr SPRN_HID1,r4 | ||
394 | isync | ||
395 | sync | ||
396 | lwz r4,CS_ICTRL(r5) | ||
397 | sync | ||
398 | mtspr SPRN_ICTRL,r4 | ||
399 | isync | ||
400 | sync | ||
401 | lwz r4,CS_LDSTCR(r5) | ||
402 | sync | ||
403 | mtspr SPRN_LDSTCR,r4 | ||
404 | isync | ||
405 | sync | ||
406 | lwz r4,CS_LDSTDB(r5) | ||
407 | sync | ||
408 | mtspr SPRN_LDSTDB,r4 | ||
409 | isync | ||
410 | sync | ||
411 | 2: bne cr6,1f | ||
412 | /* Restore 750FX specific registers | ||
413 | * that is restore HID2 on rev 2.x and PLL config & switch | ||
414 | * to PLL 0 on all | ||
415 | */ | ||
416 | /* If rev 2.x, restore HID2 with low voltage bit cleared */ | ||
417 | mfspr r3,SPRN_PVR | ||
418 | andi. r3,r3,0xff00 | ||
419 | cmpwi cr0,r3,0x0200 | ||
420 | bne 4f | ||
421 | lwz r4,CS_HID2(r5) | ||
422 | rlwinm r4,r4,0,19,17 | ||
423 | mtspr SPRN_HID2,r4 | ||
424 | sync | ||
425 | 4: | ||
426 | lwz r4,CS_HID1(r5) | ||
427 | rlwinm r5,r4,0,16,14 | ||
428 | mtspr SPRN_HID1,r5 | ||
429 | /* Wait for PLL to stabilize */ | ||
430 | mftbl r5 | ||
431 | 3: mftbl r6 | ||
432 | sub r6,r6,r5 | ||
433 | cmplwi cr0,r6,10000 | ||
434 | ble 3b | ||
435 | /* Setup final PLL */ | ||
436 | mtspr SPRN_HID1,r4 | ||
437 | 1: | ||
438 | mtcr r7 | ||
439 | blr | ||
440 | |||
diff --git a/arch/ppc/kernel/cpu_setup_power4.S b/arch/ppc/kernel/cpu_setup_power4.S new file mode 100644 index 000000000000..f2ea1a990f17 --- /dev/null +++ b/arch/ppc/kernel/cpu_setup_power4.S | |||
@@ -0,0 +1,201 @@ | |||
1 | /* | ||
2 | * This file contains low level CPU setup functions. | ||
3 | * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <asm/processor.h> | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/ppc_asm.h> | ||
16 | #include <asm/cputable.h> | ||
17 | #include <asm/ppc_asm.h> | ||
18 | #include <asm/offsets.h> | ||
19 | #include <asm/cache.h> | ||
20 | |||
21 | _GLOBAL(__970_cpu_preinit) | ||
22 | /* | ||
23 | * Deal only with PPC970 and PPC970FX. | ||
24 | */ | ||
25 | mfspr r0,SPRN_PVR | ||
26 | srwi r0,r0,16 | ||
27 | cmpwi cr0,r0,0x39 | ||
28 | cmpwi cr1,r0,0x3c | ||
29 | cror 4*cr0+eq,4*cr0+eq,4*cr1+eq | ||
30 | bnelr | ||
31 | |||
32 | /* Make sure HID4:rm_ci is off before MMU is turned off, that large | ||
33 | * pages are enabled with HID4:61 and clear HID5:DCBZ_size and | ||
34 | * HID5:DCBZ32_ill | ||
35 | */ | ||
36 | li r0,0 | ||
37 | mfspr r11,SPRN_HID4 | ||
38 | rldimi r11,r0,40,23 /* clear bit 23 (rm_ci) */ | ||
39 | rldimi r11,r0,2,61 /* clear bit 61 (lg_pg_en) */ | ||
40 | sync | ||
41 | mtspr SPRN_HID4,r11 | ||
42 | isync | ||
43 | sync | ||
44 | mfspr r11,SPRN_HID5 | ||
45 | rldimi r11,r0,6,56 /* clear bits 56 & 57 (DCBZ*) */ | ||
46 | sync | ||
47 | mtspr SPRN_HID5,r11 | ||
48 | isync | ||
49 | sync | ||
50 | |||
51 | /* Setup some basic HID1 features */ | ||
52 | mfspr r0,SPRN_HID1 | ||
53 | li r11,0x1200 /* enable i-fetch cacheability */ | ||
54 | sldi r11,r11,44 /* and prefetch */ | ||
55 | or r0,r0,r11 | ||
56 | mtspr SPRN_HID1,r0 | ||
57 | mtspr SPRN_HID1,r0 | ||
58 | isync | ||
59 | |||
60 | /* Clear HIOR */ | ||
61 | li r0,0 | ||
62 | sync | ||
63 | mtspr SPRN_HIOR,0 /* Clear interrupt prefix */ | ||
64 | isync | ||
65 | blr | ||
66 | |||
67 | _GLOBAL(__setup_cpu_power4) | ||
68 | blr | ||
69 | _GLOBAL(__setup_cpu_ppc970) | ||
70 | mfspr r0,SPRN_HID0 | ||
71 | li r11,5 /* clear DOZE and SLEEP */ | ||
72 | rldimi r0,r11,52,8 /* set NAP and DPM */ | ||
73 | mtspr SPRN_HID0,r0 | ||
74 | mfspr r0,SPRN_HID0 | ||
75 | mfspr r0,SPRN_HID0 | ||
76 | mfspr r0,SPRN_HID0 | ||
77 | mfspr r0,SPRN_HID0 | ||
78 | mfspr r0,SPRN_HID0 | ||
79 | mfspr r0,SPRN_HID0 | ||
80 | sync | ||
81 | isync | ||
82 | blr | ||
83 | |||
84 | /* Definitions for the table use to save CPU states */ | ||
85 | #define CS_HID0 0 | ||
86 | #define CS_HID1 8 | ||
87 | #define CS_HID4 16 | ||
88 | #define CS_HID5 24 | ||
89 | #define CS_SIZE 32 | ||
90 | |||
91 | .data | ||
92 | .balign L1_CACHE_LINE_SIZE | ||
93 | cpu_state_storage: | ||
94 | .space CS_SIZE | ||
95 | .balign L1_CACHE_LINE_SIZE,0 | ||
96 | .text | ||
97 | |||
98 | /* Called in normal context to backup CPU 0 state. This | ||
99 | * does not include cache settings. This function is also | ||
100 | * called for machine sleep. This does not include the MMU | ||
101 | * setup, BATs, etc... but rather the "special" registers | ||
102 | * like HID0, HID1, HID4, etc... | ||
103 | */ | ||
104 | _GLOBAL(__save_cpu_setup) | ||
105 | /* Some CR fields are volatile, we back it up all */ | ||
106 | mfcr r7 | ||
107 | |||
108 | /* Get storage ptr */ | ||
109 | lis r5,cpu_state_storage@h | ||
110 | ori r5,r5,cpu_state_storage@l | ||
111 | |||
112 | /* We only deal with 970 for now */ | ||
113 | mfspr r0,SPRN_PVR | ||
114 | srwi r0,r0,16 | ||
115 | cmpwi cr0,r0,0x39 | ||
116 | cmpwi cr1,r0,0x3c | ||
117 | cror 4*cr0+eq,4*cr0+eq,4*cr1+eq | ||
118 | bne 1f | ||
119 | |||
120 | /* Save HID0,1,4 and 5 */ | ||
121 | mfspr r3,SPRN_HID0 | ||
122 | std r3,CS_HID0(r5) | ||
123 | mfspr r3,SPRN_HID1 | ||
124 | std r3,CS_HID1(r5) | ||
125 | mfspr r3,SPRN_HID4 | ||
126 | std r3,CS_HID4(r5) | ||
127 | mfspr r3,SPRN_HID5 | ||
128 | std r3,CS_HID5(r5) | ||
129 | |||
130 | 1: | ||
131 | mtcr r7 | ||
132 | blr | ||
133 | |||
134 | /* Called with no MMU context (typically MSR:IR/DR off) to | ||
135 | * restore CPU state as backed up by the previous | ||
136 | * function. This does not include cache setting | ||
137 | */ | ||
138 | _GLOBAL(__restore_cpu_setup) | ||
139 | /* Some CR fields are volatile, we back it up all */ | ||
140 | mfcr r7 | ||
141 | |||
142 | /* Get storage ptr */ | ||
143 | lis r5,(cpu_state_storage-KERNELBASE)@h | ||
144 | ori r5,r5,cpu_state_storage@l | ||
145 | |||
146 | /* We only deal with 970 for now */ | ||
147 | mfspr r0,SPRN_PVR | ||
148 | srwi r0,r0,16 | ||
149 | cmpwi cr0,r0,0x39 | ||
150 | cmpwi cr1,r0,0x3c | ||
151 | cror 4*cr0+eq,4*cr0+eq,4*cr1+eq | ||
152 | bne 1f | ||
153 | |||
154 | /* Clear interrupt prefix */ | ||
155 | li r0,0 | ||
156 | sync | ||
157 | mtspr SPRN_HIOR,0 | ||
158 | isync | ||
159 | |||
160 | /* Restore HID0 */ | ||
161 | ld r3,CS_HID0(r5) | ||
162 | sync | ||
163 | isync | ||
164 | mtspr SPRN_HID0,r3 | ||
165 | mfspr r3,SPRN_HID0 | ||
166 | mfspr r3,SPRN_HID0 | ||
167 | mfspr r3,SPRN_HID0 | ||
168 | mfspr r3,SPRN_HID0 | ||
169 | mfspr r3,SPRN_HID0 | ||
170 | mfspr r3,SPRN_HID0 | ||
171 | sync | ||
172 | isync | ||
173 | |||
174 | /* Restore HID1 */ | ||
175 | ld r3,CS_HID1(r5) | ||
176 | sync | ||
177 | isync | ||
178 | mtspr SPRN_HID1,r3 | ||
179 | mtspr SPRN_HID1,r3 | ||
180 | sync | ||
181 | isync | ||
182 | |||
183 | /* Restore HID4 */ | ||
184 | ld r3,CS_HID4(r5) | ||
185 | sync | ||
186 | isync | ||
187 | mtspr SPRN_HID4,r3 | ||
188 | sync | ||
189 | isync | ||
190 | |||
191 | /* Restore HID5 */ | ||
192 | ld r3,CS_HID5(r5) | ||
193 | sync | ||
194 | isync | ||
195 | mtspr SPRN_HID5,r3 | ||
196 | sync | ||
197 | isync | ||
198 | 1: | ||
199 | mtcr r7 | ||
200 | blr | ||
201 | |||
diff --git a/arch/ppc/kernel/cputable.c b/arch/ppc/kernel/cputable.c new file mode 100644 index 000000000000..8aa5e8c69009 --- /dev/null +++ b/arch/ppc/kernel/cputable.c | |||
@@ -0,0 +1,922 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/cputable.c | ||
3 | * | ||
4 | * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/string.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/threads.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <asm/cputable.h> | ||
18 | |||
19 | struct cpu_spec* cur_cpu_spec[NR_CPUS]; | ||
20 | |||
21 | extern void __setup_cpu_601(unsigned long offset, int cpu_nr, struct cpu_spec* spec); | ||
22 | extern void __setup_cpu_603(unsigned long offset, int cpu_nr, struct cpu_spec* spec); | ||
23 | extern void __setup_cpu_604(unsigned long offset, int cpu_nr, struct cpu_spec* spec); | ||
24 | extern void __setup_cpu_750(unsigned long offset, int cpu_nr, struct cpu_spec* spec); | ||
25 | extern void __setup_cpu_750cx(unsigned long offset, int cpu_nr, struct cpu_spec* spec); | ||
26 | extern void __setup_cpu_750fx(unsigned long offset, int cpu_nr, struct cpu_spec* spec); | ||
27 | extern void __setup_cpu_7400(unsigned long offset, int cpu_nr, struct cpu_spec* spec); | ||
28 | extern void __setup_cpu_7410(unsigned long offset, int cpu_nr, struct cpu_spec* spec); | ||
29 | extern void __setup_cpu_745x(unsigned long offset, int cpu_nr, struct cpu_spec* spec); | ||
30 | extern void __setup_cpu_power3(unsigned long offset, int cpu_nr, struct cpu_spec* spec); | ||
31 | extern void __setup_cpu_power4(unsigned long offset, int cpu_nr, struct cpu_spec* spec); | ||
32 | extern void __setup_cpu_ppc970(unsigned long offset, int cpu_nr, struct cpu_spec* spec); | ||
33 | extern void __setup_cpu_generic(unsigned long offset, int cpu_nr, struct cpu_spec* spec); | ||
34 | |||
35 | #define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \ | ||
36 | !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \ | ||
37 | !defined(CONFIG_BOOKE)) | ||
38 | |||
39 | /* This table only contains "desktop" CPUs, it need to be filled with embedded | ||
40 | * ones as well... | ||
41 | */ | ||
42 | #define COMMON_PPC (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \ | ||
43 | PPC_FEATURE_HAS_MMU) | ||
44 | |||
45 | /* We only set the altivec features if the kernel was compiled with altivec | ||
46 | * support | ||
47 | */ | ||
48 | #ifdef CONFIG_ALTIVEC | ||
49 | #define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC | ||
50 | #define PPC_FEATURE_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC | ||
51 | #else | ||
52 | #define CPU_FTR_ALTIVEC_COMP 0 | ||
53 | #define PPC_FEATURE_ALTIVEC_COMP 0 | ||
54 | #endif | ||
55 | |||
56 | /* We only set the spe features if the kernel was compiled with | ||
57 | * spe support | ||
58 | */ | ||
59 | #ifdef CONFIG_SPE | ||
60 | #define PPC_FEATURE_SPE_COMP PPC_FEATURE_HAS_SPE | ||
61 | #else | ||
62 | #define PPC_FEATURE_SPE_COMP 0 | ||
63 | #endif | ||
64 | |||
65 | /* We need to mark all pages as being coherent if we're SMP or we | ||
66 | * have a 74[45]x and an MPC107 host bridge. | ||
67 | */ | ||
68 | #if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE) | ||
69 | #define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT | ||
70 | #else | ||
71 | #define CPU_FTR_COMMON 0 | ||
72 | #endif | ||
73 | |||
74 | /* The powersave features NAP & DOZE seems to confuse BDI when | ||
75 | debugging. So if a BDI is used, disable theses | ||
76 | */ | ||
77 | #ifndef CONFIG_BDI_SWITCH | ||
78 | #define CPU_FTR_MAYBE_CAN_DOZE CPU_FTR_CAN_DOZE | ||
79 | #define CPU_FTR_MAYBE_CAN_NAP CPU_FTR_CAN_NAP | ||
80 | #else | ||
81 | #define CPU_FTR_MAYBE_CAN_DOZE 0 | ||
82 | #define CPU_FTR_MAYBE_CAN_NAP 0 | ||
83 | #endif | ||
84 | |||
85 | struct cpu_spec cpu_specs[] = { | ||
86 | #if CLASSIC_PPC | ||
87 | { /* 601 */ | ||
88 | .pvr_mask = 0xffff0000, | ||
89 | .pvr_value = 0x00010000, | ||
90 | .cpu_name = "601", | ||
91 | .cpu_features = CPU_FTR_COMMON | CPU_FTR_601 | | ||
92 | CPU_FTR_HPTE_TABLE, | ||
93 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_601_INSTR | | ||
94 | PPC_FEATURE_UNIFIED_CACHE, | ||
95 | .icache_bsize = 32, | ||
96 | .dcache_bsize = 32, | ||
97 | .cpu_setup = __setup_cpu_601 | ||
98 | }, | ||
99 | { /* 603 */ | ||
100 | .pvr_mask = 0xffff0000, | ||
101 | .pvr_value = 0x00030000, | ||
102 | .cpu_name = "603", | ||
103 | .cpu_features = CPU_FTR_COMMON | | ||
104 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | | ||
105 | CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP, | ||
106 | .cpu_user_features = COMMON_PPC, | ||
107 | .icache_bsize = 32, | ||
108 | .dcache_bsize = 32, | ||
109 | .cpu_setup = __setup_cpu_603 | ||
110 | }, | ||
111 | { /* 603e */ | ||
112 | .pvr_mask = 0xffff0000, | ||
113 | .pvr_value = 0x00060000, | ||
114 | .cpu_name = "603e", | ||
115 | .cpu_features = CPU_FTR_COMMON | | ||
116 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | | ||
117 | CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP, | ||
118 | .cpu_user_features = COMMON_PPC, | ||
119 | .icache_bsize = 32, | ||
120 | .dcache_bsize = 32, | ||
121 | .cpu_setup = __setup_cpu_603 | ||
122 | }, | ||
123 | { /* 603ev */ | ||
124 | .pvr_mask = 0xffff0000, | ||
125 | .pvr_value = 0x00070000, | ||
126 | .cpu_name = "603ev", | ||
127 | .cpu_features = CPU_FTR_COMMON | | ||
128 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | | ||
129 | CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP, | ||
130 | .cpu_user_features = COMMON_PPC, | ||
131 | .icache_bsize = 32, | ||
132 | .dcache_bsize = 32, | ||
133 | .cpu_setup = __setup_cpu_603 | ||
134 | }, | ||
135 | { /* 604 */ | ||
136 | .pvr_mask = 0xffff0000, | ||
137 | .pvr_value = 0x00040000, | ||
138 | .cpu_name = "604", | ||
139 | .cpu_features = CPU_FTR_COMMON | | ||
140 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
141 | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE, | ||
142 | .cpu_user_features = COMMON_PPC, | ||
143 | .icache_bsize = 32, | ||
144 | .dcache_bsize = 32, | ||
145 | .num_pmcs = 2, | ||
146 | .cpu_setup = __setup_cpu_604 | ||
147 | }, | ||
148 | { /* 604e */ | ||
149 | .pvr_mask = 0xfffff000, | ||
150 | .pvr_value = 0x00090000, | ||
151 | .cpu_name = "604e", | ||
152 | .cpu_features = CPU_FTR_COMMON | | ||
153 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
154 | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE, | ||
155 | .cpu_user_features = COMMON_PPC, | ||
156 | .icache_bsize = 32, | ||
157 | .dcache_bsize = 32, | ||
158 | .num_pmcs = 4, | ||
159 | .cpu_setup = __setup_cpu_604 | ||
160 | }, | ||
161 | { /* 604r */ | ||
162 | .pvr_mask = 0xffff0000, | ||
163 | .pvr_value = 0x00090000, | ||
164 | .cpu_name = "604r", | ||
165 | .cpu_features = CPU_FTR_COMMON | | ||
166 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
167 | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE, | ||
168 | .cpu_user_features = COMMON_PPC, | ||
169 | .icache_bsize = 32, | ||
170 | .dcache_bsize = 32, | ||
171 | .num_pmcs = 4, | ||
172 | .cpu_setup = __setup_cpu_604 | ||
173 | }, | ||
174 | { /* 604ev */ | ||
175 | .pvr_mask = 0xffff0000, | ||
176 | .pvr_value = 0x000a0000, | ||
177 | .cpu_name = "604ev", | ||
178 | .cpu_features = CPU_FTR_COMMON | | ||
179 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
180 | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE, | ||
181 | .cpu_user_features = COMMON_PPC, | ||
182 | .icache_bsize = 32, | ||
183 | .dcache_bsize = 32, | ||
184 | .num_pmcs = 4, | ||
185 | .cpu_setup = __setup_cpu_604 | ||
186 | }, | ||
187 | { /* 740/750 (0x4202, don't support TAU ?) */ | ||
188 | .pvr_mask = 0xffffffff, | ||
189 | .pvr_value = 0x00084202, | ||
190 | .cpu_name = "740/750", | ||
191 | .cpu_features = CPU_FTR_COMMON | | ||
192 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | | ||
193 | CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_HPTE_TABLE | | ||
194 | CPU_FTR_MAYBE_CAN_NAP, | ||
195 | .cpu_user_features = COMMON_PPC, | ||
196 | .icache_bsize = 32, | ||
197 | .dcache_bsize = 32, | ||
198 | .num_pmcs = 4, | ||
199 | .cpu_setup = __setup_cpu_750 | ||
200 | }, | ||
201 | { /* 745/755 */ | ||
202 | .pvr_mask = 0xfffff000, | ||
203 | .pvr_value = 0x00083000, | ||
204 | .cpu_name = "745/755", | ||
205 | .cpu_features = CPU_FTR_COMMON | | ||
206 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | | ||
207 | CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | | ||
208 | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP, | ||
209 | .cpu_user_features = COMMON_PPC, | ||
210 | .icache_bsize = 32, | ||
211 | .dcache_bsize = 32, | ||
212 | .num_pmcs = 4, | ||
213 | .cpu_setup = __setup_cpu_750 | ||
214 | }, | ||
215 | { /* 750CX (80100 and 8010x?) */ | ||
216 | .pvr_mask = 0xfffffff0, | ||
217 | .pvr_value = 0x00080100, | ||
218 | .cpu_name = "750CX", | ||
219 | .cpu_features = CPU_FTR_COMMON | | ||
220 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | | ||
221 | CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | | ||
222 | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP, | ||
223 | .cpu_user_features = COMMON_PPC, | ||
224 | .icache_bsize = 32, | ||
225 | .dcache_bsize = 32, | ||
226 | .num_pmcs = 4, | ||
227 | .cpu_setup = __setup_cpu_750cx | ||
228 | }, | ||
229 | { /* 750CX (82201 and 82202) */ | ||
230 | .pvr_mask = 0xfffffff0, | ||
231 | .pvr_value = 0x00082200, | ||
232 | .cpu_name = "750CX", | ||
233 | .cpu_features = CPU_FTR_COMMON | | ||
234 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | | ||
235 | CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | | ||
236 | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP, | ||
237 | .cpu_user_features = COMMON_PPC, | ||
238 | .icache_bsize = 32, | ||
239 | .dcache_bsize = 32, | ||
240 | .num_pmcs = 4, | ||
241 | .cpu_setup = __setup_cpu_750cx | ||
242 | }, | ||
243 | { /* 750CXe (82214) */ | ||
244 | .pvr_mask = 0xfffffff0, | ||
245 | .pvr_value = 0x00082210, | ||
246 | .cpu_name = "750CXe", | ||
247 | .cpu_features = CPU_FTR_COMMON | | ||
248 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | | ||
249 | CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | | ||
250 | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP, | ||
251 | .cpu_user_features = COMMON_PPC, | ||
252 | .icache_bsize = 32, | ||
253 | .dcache_bsize = 32, | ||
254 | .num_pmcs = 4, | ||
255 | .cpu_setup = __setup_cpu_750cx | ||
256 | }, | ||
257 | { /* 750FX rev 1.x */ | ||
258 | .pvr_mask = 0xffffff00, | ||
259 | .pvr_value = 0x70000100, | ||
260 | .cpu_name = "750FX", | ||
261 | .cpu_features = CPU_FTR_COMMON | | ||
262 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | | ||
263 | CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | | ||
264 | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | | ||
265 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM, | ||
266 | .cpu_user_features = COMMON_PPC, | ||
267 | .icache_bsize = 32, | ||
268 | .dcache_bsize = 32, | ||
269 | .num_pmcs = 4, | ||
270 | .cpu_setup = __setup_cpu_750 | ||
271 | }, | ||
272 | { /* 750FX rev 2.0 must disable HID0[DPM] */ | ||
273 | .pvr_mask = 0xffffffff, | ||
274 | .pvr_value = 0x70000200, | ||
275 | .cpu_name = "750FX", | ||
276 | .cpu_features = CPU_FTR_COMMON | | ||
277 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | | ||
278 | CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | | ||
279 | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | | ||
280 | CPU_FTR_NO_DPM, | ||
281 | .cpu_user_features = COMMON_PPC, | ||
282 | .icache_bsize = 32, | ||
283 | .dcache_bsize = 32, | ||
284 | .num_pmcs = 4, | ||
285 | .cpu_setup = __setup_cpu_750 | ||
286 | }, | ||
287 | { /* 750FX (All revs except 2.0) */ | ||
288 | .pvr_mask = 0xffff0000, | ||
289 | .pvr_value = 0x70000000, | ||
290 | .cpu_name = "750FX", | ||
291 | .cpu_features = CPU_FTR_COMMON | | ||
292 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | | ||
293 | CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | | ||
294 | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | | ||
295 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS, | ||
296 | .cpu_user_features = COMMON_PPC, | ||
297 | .icache_bsize = 32, | ||
298 | .dcache_bsize = 32, | ||
299 | .num_pmcs = 4, | ||
300 | .cpu_setup = __setup_cpu_750fx | ||
301 | }, | ||
302 | { /* 750GX */ | ||
303 | .pvr_mask = 0xffff0000, | ||
304 | .pvr_value = 0x70020000, | ||
305 | .cpu_name = "750GX", | ||
306 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
307 | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | | ||
308 | CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | | ||
309 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_DUAL_PLL_750FX | | ||
310 | CPU_FTR_HAS_HIGH_BATS, | ||
311 | .cpu_user_features = COMMON_PPC, | ||
312 | .icache_bsize = 32, | ||
313 | .dcache_bsize = 32, | ||
314 | .num_pmcs = 4, | ||
315 | .cpu_setup = __setup_cpu_750fx | ||
316 | }, | ||
317 | { /* 740/750 (L2CR bit need fixup for 740) */ | ||
318 | .pvr_mask = 0xffff0000, | ||
319 | .pvr_value = 0x00080000, | ||
320 | .cpu_name = "740/750", | ||
321 | .cpu_features = CPU_FTR_COMMON | | ||
322 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | | ||
323 | CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | | ||
324 | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP, | ||
325 | .cpu_user_features = COMMON_PPC, | ||
326 | .icache_bsize = 32, | ||
327 | .dcache_bsize = 32, | ||
328 | .num_pmcs = 4, | ||
329 | .cpu_setup = __setup_cpu_750 | ||
330 | }, | ||
331 | { /* 7400 rev 1.1 ? (no TAU) */ | ||
332 | .pvr_mask = 0xffffffff, | ||
333 | .pvr_value = 0x000c1101, | ||
334 | .cpu_name = "7400 (1.1)", | ||
335 | .cpu_features = CPU_FTR_COMMON | | ||
336 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | | ||
337 | CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | | ||
338 | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP, | ||
339 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP, | ||
340 | .icache_bsize = 32, | ||
341 | .dcache_bsize = 32, | ||
342 | .num_pmcs = 4, | ||
343 | .cpu_setup = __setup_cpu_7400 | ||
344 | }, | ||
345 | { /* 7400 */ | ||
346 | .pvr_mask = 0xffff0000, | ||
347 | .pvr_value = 0x000c0000, | ||
348 | .cpu_name = "7400", | ||
349 | .cpu_features = CPU_FTR_COMMON | | ||
350 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | | ||
351 | CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | | ||
352 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | | ||
353 | CPU_FTR_MAYBE_CAN_NAP, | ||
354 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP, | ||
355 | .icache_bsize = 32, | ||
356 | .dcache_bsize = 32, | ||
357 | .num_pmcs = 4, | ||
358 | .cpu_setup = __setup_cpu_7400 | ||
359 | }, | ||
360 | { /* 7410 */ | ||
361 | .pvr_mask = 0xffff0000, | ||
362 | .pvr_value = 0x800c0000, | ||
363 | .cpu_name = "7410", | ||
364 | .cpu_features = CPU_FTR_COMMON | | ||
365 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | | ||
366 | CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | | ||
367 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | | ||
368 | CPU_FTR_MAYBE_CAN_NAP, | ||
369 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP, | ||
370 | .icache_bsize = 32, | ||
371 | .dcache_bsize = 32, | ||
372 | .num_pmcs = 4, | ||
373 | .cpu_setup = __setup_cpu_7410 | ||
374 | }, | ||
375 | { /* 7450 2.0 - no doze/nap */ | ||
376 | .pvr_mask = 0xffffffff, | ||
377 | .pvr_value = 0x80000200, | ||
378 | .cpu_name = "7450", | ||
379 | .cpu_features = CPU_FTR_COMMON | | ||
380 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
381 | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | | ||
382 | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | | ||
383 | CPU_FTR_NEED_COHERENT, | ||
384 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP, | ||
385 | .icache_bsize = 32, | ||
386 | .dcache_bsize = 32, | ||
387 | .num_pmcs = 6, | ||
388 | .cpu_setup = __setup_cpu_745x | ||
389 | }, | ||
390 | { /* 7450 2.1 */ | ||
391 | .pvr_mask = 0xffffffff, | ||
392 | .pvr_value = 0x80000201, | ||
393 | .cpu_name = "7450", | ||
394 | .cpu_features = CPU_FTR_COMMON | | ||
395 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
396 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | | ||
397 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | | ||
398 | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | | ||
399 | CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | | ||
400 | CPU_FTR_NEED_COHERENT, | ||
401 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP, | ||
402 | .icache_bsize = 32, | ||
403 | .dcache_bsize = 32, | ||
404 | .num_pmcs = 6, | ||
405 | .cpu_setup = __setup_cpu_745x | ||
406 | }, | ||
407 | { /* 7450 2.3 and newer */ | ||
408 | .pvr_mask = 0xffff0000, | ||
409 | .pvr_value = 0x80000000, | ||
410 | .cpu_name = "7450", | ||
411 | .cpu_features = CPU_FTR_COMMON | | ||
412 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
413 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | | ||
414 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | | ||
415 | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | | ||
416 | CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT, | ||
417 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP, | ||
418 | .icache_bsize = 32, | ||
419 | .dcache_bsize = 32, | ||
420 | .num_pmcs = 6, | ||
421 | .cpu_setup = __setup_cpu_745x | ||
422 | }, | ||
423 | { /* 7455 rev 1.x */ | ||
424 | .pvr_mask = 0xffffff00, | ||
425 | .pvr_value = 0x80010100, | ||
426 | .cpu_name = "7455", | ||
427 | .cpu_features = CPU_FTR_COMMON | | ||
428 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
429 | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | | ||
430 | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | | ||
431 | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT, | ||
432 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP, | ||
433 | .icache_bsize = 32, | ||
434 | .dcache_bsize = 32, | ||
435 | .num_pmcs = 6, | ||
436 | .cpu_setup = __setup_cpu_745x | ||
437 | }, | ||
438 | { /* 7455 rev 2.0 */ | ||
439 | .pvr_mask = 0xffffffff, | ||
440 | .pvr_value = 0x80010200, | ||
441 | .cpu_name = "7455", | ||
442 | .cpu_features = CPU_FTR_COMMON | | ||
443 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
444 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | | ||
445 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | | ||
446 | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | | ||
447 | CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | | ||
448 | CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS, | ||
449 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP, | ||
450 | .icache_bsize = 32, | ||
451 | .dcache_bsize = 32, | ||
452 | .num_pmcs = 6, | ||
453 | .cpu_setup = __setup_cpu_745x | ||
454 | }, | ||
455 | { /* 7455 others */ | ||
456 | .pvr_mask = 0xffff0000, | ||
457 | .pvr_value = 0x80010000, | ||
458 | .cpu_name = "7455", | ||
459 | .cpu_features = CPU_FTR_COMMON | | ||
460 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
461 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | | ||
462 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | | ||
463 | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | | ||
464 | CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | | ||
465 | CPU_FTR_NEED_COHERENT, | ||
466 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP, | ||
467 | .icache_bsize = 32, | ||
468 | .dcache_bsize = 32, | ||
469 | .num_pmcs = 6, | ||
470 | .cpu_setup = __setup_cpu_745x | ||
471 | }, | ||
472 | { /* 7447/7457 Rev 1.0 */ | ||
473 | .pvr_mask = 0xffffffff, | ||
474 | .pvr_value = 0x80020100, | ||
475 | .cpu_name = "7447/7457", | ||
476 | .cpu_features = CPU_FTR_COMMON | | ||
477 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
478 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | | ||
479 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | | ||
480 | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | | ||
481 | CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | | ||
482 | CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC, | ||
483 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP, | ||
484 | .icache_bsize = 32, | ||
485 | .dcache_bsize = 32, | ||
486 | .num_pmcs = 6, | ||
487 | .cpu_setup = __setup_cpu_745x | ||
488 | }, | ||
489 | { /* 7447/7457 Rev 1.1 */ | ||
490 | .pvr_mask = 0xffffffff, | ||
491 | .pvr_value = 0x80020101, | ||
492 | .cpu_name = "7447/7457", | ||
493 | .cpu_features = CPU_FTR_COMMON | | ||
494 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
495 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | | ||
496 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | | ||
497 | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | | ||
498 | CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | | ||
499 | CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC, | ||
500 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP, | ||
501 | .icache_bsize = 32, | ||
502 | .dcache_bsize = 32, | ||
503 | .num_pmcs = 6, | ||
504 | .cpu_setup = __setup_cpu_745x | ||
505 | }, | ||
506 | { /* 7447/7457 Rev 1.2 and later */ | ||
507 | .pvr_mask = 0xffff0000, | ||
508 | .pvr_value = 0x80020000, | ||
509 | .cpu_name = "7447/7457", | ||
510 | .cpu_features = CPU_FTR_COMMON | | ||
511 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
512 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | | ||
513 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | | ||
514 | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | | ||
515 | CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | | ||
516 | CPU_FTR_NEED_COHERENT, | ||
517 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP, | ||
518 | .icache_bsize = 32, | ||
519 | .dcache_bsize = 32, | ||
520 | .num_pmcs = 6, | ||
521 | .cpu_setup = __setup_cpu_745x | ||
522 | }, | ||
523 | { /* 7447A */ | ||
524 | .pvr_mask = 0xffff0000, | ||
525 | .pvr_value = 0x80030000, | ||
526 | .cpu_name = "7447A", | ||
527 | .cpu_features = CPU_FTR_COMMON | | ||
528 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
529 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | | ||
530 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | | ||
531 | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | | ||
532 | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT, | ||
533 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP, | ||
534 | .icache_bsize = 32, | ||
535 | .dcache_bsize = 32, | ||
536 | .num_pmcs = 6, | ||
537 | .cpu_setup = __setup_cpu_745x | ||
538 | }, | ||
539 | { /* 82xx (8240, 8245, 8260 are all 603e cores) */ | ||
540 | .pvr_mask = 0x7fff0000, | ||
541 | .pvr_value = 0x00810000, | ||
542 | .cpu_name = "82xx", | ||
543 | .cpu_features = CPU_FTR_COMMON | | ||
544 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | | ||
545 | CPU_FTR_USE_TB, | ||
546 | .cpu_user_features = COMMON_PPC, | ||
547 | .icache_bsize = 32, | ||
548 | .dcache_bsize = 32, | ||
549 | .cpu_setup = __setup_cpu_603 | ||
550 | }, | ||
551 | { /* All G2_LE (603e core, plus some) have the same pvr */ | ||
552 | .pvr_mask = 0x7fff0000, | ||
553 | .pvr_value = 0x00820000, | ||
554 | .cpu_name = "G2_LE", | ||
555 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
556 | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | | ||
557 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS, | ||
558 | .cpu_user_features = COMMON_PPC, | ||
559 | .icache_bsize = 32, | ||
560 | .dcache_bsize = 32, | ||
561 | .cpu_setup = __setup_cpu_603 | ||
562 | }, | ||
563 | { /* e300 (a 603e core, plus some) on 83xx */ | ||
564 | .pvr_mask = 0x7fff0000, | ||
565 | .pvr_value = 0x00830000, | ||
566 | .cpu_name = "e300", | ||
567 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
568 | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | | ||
569 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS, | ||
570 | .cpu_user_features = COMMON_PPC, | ||
571 | .icache_bsize = 32, | ||
572 | .dcache_bsize = 32, | ||
573 | .cpu_setup = __setup_cpu_603 | ||
574 | }, | ||
575 | { /* default match, we assume split I/D cache & TB (non-601)... */ | ||
576 | .pvr_mask = 0x00000000, | ||
577 | .pvr_value = 0x00000000, | ||
578 | .cpu_name = "(generic PPC)", | ||
579 | .cpu_features = CPU_FTR_COMMON | | ||
580 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
581 | CPU_FTR_HPTE_TABLE, | ||
582 | .cpu_user_features = COMMON_PPC, | ||
583 | .icache_bsize = 32, | ||
584 | .dcache_bsize = 32, | ||
585 | .cpu_setup = __setup_cpu_generic | ||
586 | }, | ||
587 | #endif /* CLASSIC_PPC */ | ||
588 | #ifdef CONFIG_PPC64BRIDGE | ||
589 | { /* Power3 */ | ||
590 | .pvr_mask = 0xffff0000, | ||
591 | .pvr_value = 0x00400000, | ||
592 | .cpu_name = "Power3 (630)", | ||
593 | .cpu_features = CPU_FTR_COMMON | | ||
594 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
595 | CPU_FTR_HPTE_TABLE, | ||
596 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_64, | ||
597 | .icache_bsize = 128, | ||
598 | .dcache_bsize = 128, | ||
599 | .num_pmcs = 8, | ||
600 | .cpu_setup = __setup_cpu_power3 | ||
601 | }, | ||
602 | { /* Power3+ */ | ||
603 | .pvr_mask = 0xffff0000, | ||
604 | .pvr_value = 0x00410000, | ||
605 | .cpu_name = "Power3 (630+)", | ||
606 | .cpu_features = CPU_FTR_COMMON | | ||
607 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
608 | CPU_FTR_HPTE_TABLE, | ||
609 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_64, | ||
610 | .icache_bsize = 128, | ||
611 | .dcache_bsize = 128, | ||
612 | .num_pmcs = 8, | ||
613 | .cpu_setup = __setup_cpu_power3 | ||
614 | }, | ||
615 | { /* I-star */ | ||
616 | .pvr_mask = 0xffff0000, | ||
617 | .pvr_value = 0x00360000, | ||
618 | .cpu_name = "I-star", | ||
619 | .cpu_features = CPU_FTR_COMMON | | ||
620 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
621 | CPU_FTR_HPTE_TABLE, | ||
622 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_64, | ||
623 | .icache_bsize = 128, | ||
624 | .dcache_bsize = 128, | ||
625 | .num_pmcs = 8, | ||
626 | .cpu_setup = __setup_cpu_power3 | ||
627 | }, | ||
628 | { /* S-star */ | ||
629 | .pvr_mask = 0xffff0000, | ||
630 | .pvr_value = 0x00370000, | ||
631 | .cpu_name = "S-star", | ||
632 | .cpu_features = CPU_FTR_COMMON | | ||
633 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
634 | CPU_FTR_HPTE_TABLE, | ||
635 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_64, | ||
636 | .icache_bsize = 128, | ||
637 | .dcache_bsize = 128, | ||
638 | .num_pmcs = 8, | ||
639 | .cpu_setup = __setup_cpu_power3 | ||
640 | }, | ||
641 | #endif /* CONFIG_PPC64BRIDGE */ | ||
642 | #ifdef CONFIG_POWER4 | ||
643 | { /* Power4 */ | ||
644 | .pvr_mask = 0xffff0000, | ||
645 | .pvr_value = 0x00350000, | ||
646 | .cpu_name = "Power4", | ||
647 | .cpu_features = CPU_FTR_COMMON | | ||
648 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
649 | CPU_FTR_HPTE_TABLE, | ||
650 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_64, | ||
651 | .icache_bsize = 128, | ||
652 | .dcache_bsize = 128, | ||
653 | .num_pmcs = 8, | ||
654 | .cpu_setup = __setup_cpu_power4 | ||
655 | }, | ||
656 | { /* PPC970 */ | ||
657 | .pvr_mask = 0xffff0000, | ||
658 | .pvr_value = 0x00390000, | ||
659 | .cpu_name = "PPC970", | ||
660 | .cpu_features = CPU_FTR_COMMON | | ||
661 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
662 | CPU_FTR_HPTE_TABLE | | ||
663 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_MAYBE_CAN_NAP, | ||
664 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_64 | | ||
665 | PPC_FEATURE_ALTIVEC_COMP, | ||
666 | .icache_bsize = 128, | ||
667 | .dcache_bsize = 128, | ||
668 | .num_pmcs = 8, | ||
669 | .cpu_setup = __setup_cpu_ppc970 | ||
670 | }, | ||
671 | { /* PPC970FX */ | ||
672 | .pvr_mask = 0xffff0000, | ||
673 | .pvr_value = 0x003c0000, | ||
674 | .cpu_name = "PPC970FX", | ||
675 | .cpu_features = CPU_FTR_COMMON | | ||
676 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
677 | CPU_FTR_HPTE_TABLE | | ||
678 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_MAYBE_CAN_NAP, | ||
679 | .cpu_user_features = COMMON_PPC | PPC_FEATURE_64 | | ||
680 | PPC_FEATURE_ALTIVEC_COMP, | ||
681 | .icache_bsize = 128, | ||
682 | .dcache_bsize = 128, | ||
683 | .num_pmcs = 8, | ||
684 | .cpu_setup = __setup_cpu_ppc970 | ||
685 | }, | ||
686 | #endif /* CONFIG_POWER4 */ | ||
687 | #ifdef CONFIG_8xx | ||
688 | { /* 8xx */ | ||
689 | .pvr_mask = 0xffff0000, | ||
690 | .pvr_value = 0x00500000, | ||
691 | .cpu_name = "8xx", | ||
692 | /* CPU_FTR_MAYBE_CAN_DOZE is possible, | ||
693 | * if the 8xx code is there.... */ | ||
694 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
695 | CPU_FTR_USE_TB, | ||
696 | .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, | ||
697 | .icache_bsize = 16, | ||
698 | .dcache_bsize = 16, | ||
699 | }, | ||
700 | #endif /* CONFIG_8xx */ | ||
701 | #ifdef CONFIG_40x | ||
702 | { /* 403GC */ | ||
703 | .pvr_mask = 0xffffff00, | ||
704 | .pvr_value = 0x00200200, | ||
705 | .cpu_name = "403GC", | ||
706 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
707 | CPU_FTR_USE_TB, | ||
708 | .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, | ||
709 | .icache_bsize = 16, | ||
710 | .dcache_bsize = 16, | ||
711 | }, | ||
712 | { /* 403GCX */ | ||
713 | .pvr_mask = 0xffffff00, | ||
714 | .pvr_value = 0x00201400, | ||
715 | .cpu_name = "403GCX", | ||
716 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
717 | CPU_FTR_USE_TB, | ||
718 | .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, | ||
719 | .icache_bsize = 16, | ||
720 | .dcache_bsize = 16, | ||
721 | }, | ||
722 | { /* 403G ?? */ | ||
723 | .pvr_mask = 0xffff0000, | ||
724 | .pvr_value = 0x00200000, | ||
725 | .cpu_name = "403G ??", | ||
726 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
727 | CPU_FTR_USE_TB, | ||
728 | .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, | ||
729 | .icache_bsize = 16, | ||
730 | .dcache_bsize = 16, | ||
731 | }, | ||
732 | { /* 405GP */ | ||
733 | .pvr_mask = 0xffff0000, | ||
734 | .pvr_value = 0x40110000, | ||
735 | .cpu_name = "405GP", | ||
736 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
737 | CPU_FTR_USE_TB, | ||
738 | .cpu_user_features = PPC_FEATURE_32 | | ||
739 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, | ||
740 | .icache_bsize = 32, | ||
741 | .dcache_bsize = 32, | ||
742 | }, | ||
743 | { /* STB 03xxx */ | ||
744 | .pvr_mask = 0xffff0000, | ||
745 | .pvr_value = 0x40130000, | ||
746 | .cpu_name = "STB03xxx", | ||
747 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
748 | CPU_FTR_USE_TB, | ||
749 | .cpu_user_features = PPC_FEATURE_32 | | ||
750 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, | ||
751 | .icache_bsize = 32, | ||
752 | .dcache_bsize = 32, | ||
753 | }, | ||
754 | { /* STB 04xxx */ | ||
755 | .pvr_mask = 0xffff0000, | ||
756 | .pvr_value = 0x41810000, | ||
757 | .cpu_name = "STB04xxx", | ||
758 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
759 | CPU_FTR_USE_TB, | ||
760 | .cpu_user_features = PPC_FEATURE_32 | | ||
761 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, | ||
762 | .icache_bsize = 32, | ||
763 | .dcache_bsize = 32, | ||
764 | }, | ||
765 | { /* NP405L */ | ||
766 | .pvr_mask = 0xffff0000, | ||
767 | .pvr_value = 0x41610000, | ||
768 | .cpu_name = "NP405L", | ||
769 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
770 | CPU_FTR_USE_TB, | ||
771 | .cpu_user_features = PPC_FEATURE_32 | | ||
772 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, | ||
773 | .icache_bsize = 32, | ||
774 | .dcache_bsize = 32, | ||
775 | }, | ||
776 | { /* NP4GS3 */ | ||
777 | .pvr_mask = 0xffff0000, | ||
778 | .pvr_value = 0x40B10000, | ||
779 | .cpu_name = "NP4GS3", | ||
780 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
781 | CPU_FTR_USE_TB, | ||
782 | .cpu_user_features = PPC_FEATURE_32 | | ||
783 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, | ||
784 | .icache_bsize = 32, | ||
785 | .dcache_bsize = 32, | ||
786 | }, | ||
787 | { /* NP405H */ | ||
788 | .pvr_mask = 0xffff0000, | ||
789 | .pvr_value = 0x41410000, | ||
790 | .cpu_name = "NP405H", | ||
791 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
792 | CPU_FTR_USE_TB, | ||
793 | .cpu_user_features = PPC_FEATURE_32 | | ||
794 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, | ||
795 | .icache_bsize = 32, | ||
796 | .dcache_bsize = 32, | ||
797 | }, | ||
798 | { /* 405GPr */ | ||
799 | .pvr_mask = 0xffff0000, | ||
800 | .pvr_value = 0x50910000, | ||
801 | .cpu_name = "405GPr", | ||
802 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
803 | CPU_FTR_USE_TB, | ||
804 | .cpu_user_features = PPC_FEATURE_32 | | ||
805 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, | ||
806 | .icache_bsize = 32, | ||
807 | .dcache_bsize = 32, | ||
808 | }, | ||
809 | { /* STBx25xx */ | ||
810 | .pvr_mask = 0xffff0000, | ||
811 | .pvr_value = 0x51510000, | ||
812 | .cpu_name = "STBx25xx", | ||
813 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
814 | CPU_FTR_USE_TB, | ||
815 | .cpu_user_features = PPC_FEATURE_32 | | ||
816 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, | ||
817 | .icache_bsize = 32, | ||
818 | .dcache_bsize = 32, | ||
819 | }, | ||
820 | { /* 405LP */ | ||
821 | .pvr_mask = 0xffff0000, | ||
822 | .pvr_value = 0x41F10000, | ||
823 | .cpu_name = "405LP", | ||
824 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
825 | CPU_FTR_USE_TB, | ||
826 | .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, | ||
827 | .icache_bsize = 32, | ||
828 | .dcache_bsize = 32, | ||
829 | }, | ||
830 | { /* Xilinx Virtex-II Pro */ | ||
831 | .pvr_mask = 0xffff0000, | ||
832 | .pvr_value = 0x20010000, | ||
833 | .cpu_name = "Virtex-II Pro", | ||
834 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
835 | CPU_FTR_USE_TB, | ||
836 | .cpu_user_features = PPC_FEATURE_32 | | ||
837 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, | ||
838 | .icache_bsize = 32, | ||
839 | .dcache_bsize = 32, | ||
840 | }, | ||
841 | |||
842 | #endif /* CONFIG_40x */ | ||
843 | #ifdef CONFIG_44x | ||
844 | { /* 440GP Rev. B */ | ||
845 | .pvr_mask = 0xf0000fff, | ||
846 | .pvr_value = 0x40000440, | ||
847 | .cpu_name = "440GP Rev. B", | ||
848 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
849 | CPU_FTR_USE_TB, | ||
850 | .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, | ||
851 | .icache_bsize = 32, | ||
852 | .dcache_bsize = 32, | ||
853 | }, | ||
854 | { /* 440GP Rev. C */ | ||
855 | .pvr_mask = 0xf0000fff, | ||
856 | .pvr_value = 0x40000481, | ||
857 | .cpu_name = "440GP Rev. C", | ||
858 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
859 | CPU_FTR_USE_TB, | ||
860 | .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, | ||
861 | .icache_bsize = 32, | ||
862 | .dcache_bsize = 32, | ||
863 | }, | ||
864 | { /* 440GX Rev. A */ | ||
865 | .pvr_mask = 0xf0000fff, | ||
866 | .pvr_value = 0x50000850, | ||
867 | .cpu_name = "440GX Rev. A", | ||
868 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
869 | CPU_FTR_USE_TB, | ||
870 | .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, | ||
871 | .icache_bsize = 32, | ||
872 | .dcache_bsize = 32, | ||
873 | }, | ||
874 | { /* 440GX Rev. B */ | ||
875 | .pvr_mask = 0xf0000fff, | ||
876 | .pvr_value = 0x50000851, | ||
877 | .cpu_name = "440GX Rev. B", | ||
878 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
879 | CPU_FTR_USE_TB, | ||
880 | .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, | ||
881 | .icache_bsize = 32, | ||
882 | .dcache_bsize = 32, | ||
883 | }, | ||
884 | { /* 440GX Rev. C */ | ||
885 | .pvr_mask = 0xf0000fff, | ||
886 | .pvr_value = 0x50000892, | ||
887 | .cpu_name = "440GX Rev. C", | ||
888 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
889 | CPU_FTR_USE_TB, | ||
890 | .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, | ||
891 | .icache_bsize = 32, | ||
892 | .dcache_bsize = 32, | ||
893 | }, | ||
894 | #endif /* CONFIG_44x */ | ||
895 | #ifdef CONFIG_E500 | ||
896 | { /* e500 */ | ||
897 | .pvr_mask = 0xffff0000, | ||
898 | .pvr_value = 0x80200000, | ||
899 | .cpu_name = "e500", | ||
900 | /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ | ||
901 | .cpu_features = CPU_FTR_SPLIT_ID_CACHE | | ||
902 | CPU_FTR_USE_TB, | ||
903 | .cpu_user_features = PPC_FEATURE_32 | | ||
904 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP | | ||
905 | PPC_FEATURE_HAS_EFP_SINGLE, | ||
906 | .icache_bsize = 32, | ||
907 | .dcache_bsize = 32, | ||
908 | .num_pmcs = 4, | ||
909 | }, | ||
910 | #endif | ||
911 | #if !CLASSIC_PPC | ||
912 | { /* default match */ | ||
913 | .pvr_mask = 0x00000000, | ||
914 | .pvr_value = 0x00000000, | ||
915 | .cpu_name = "(generic PPC)", | ||
916 | .cpu_features = CPU_FTR_COMMON, | ||
917 | .cpu_user_features = PPC_FEATURE_32, | ||
918 | .icache_bsize = 32, | ||
919 | .dcache_bsize = 32, | ||
920 | } | ||
921 | #endif /* !CLASSIC_PPC */ | ||
922 | }; | ||
diff --git a/arch/ppc/kernel/dma-mapping.c b/arch/ppc/kernel/dma-mapping.c new file mode 100644 index 000000000000..e0c631cf96b0 --- /dev/null +++ b/arch/ppc/kernel/dma-mapping.c | |||
@@ -0,0 +1,447 @@ | |||
1 | /* | ||
2 | * PowerPC version derived from arch/arm/mm/consistent.c | ||
3 | * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) | ||
4 | * | ||
5 | * Copyright (C) 2000 Russell King | ||
6 | * | ||
7 | * Consistent memory allocators. Used for DMA devices that want to | ||
8 | * share uncached memory with the processor core. The function return | ||
9 | * is the virtual address and 'dma_handle' is the physical address. | ||
10 | * Mostly stolen from the ARM port, with some changes for PowerPC. | ||
11 | * -- Dan | ||
12 | * | ||
13 | * Reorganized to get rid of the arch-specific consistent_* functions | ||
14 | * and provide non-coherent implementations for the DMA API. -Matt | ||
15 | * | ||
16 | * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent() | ||
17 | * implementation. This is pulled straight from ARM and barely | ||
18 | * modified. -Matt | ||
19 | * | ||
20 | * This program is free software; you can redistribute it and/or modify | ||
21 | * it under the terms of the GNU General Public License version 2 as | ||
22 | * published by the Free Software Foundation. | ||
23 | */ | ||
24 | |||
25 | #include <linux/config.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/signal.h> | ||
28 | #include <linux/sched.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/errno.h> | ||
31 | #include <linux/string.h> | ||
32 | #include <linux/types.h> | ||
33 | #include <linux/ptrace.h> | ||
34 | #include <linux/mman.h> | ||
35 | #include <linux/mm.h> | ||
36 | #include <linux/swap.h> | ||
37 | #include <linux/stddef.h> | ||
38 | #include <linux/vmalloc.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/delay.h> | ||
41 | #include <linux/bootmem.h> | ||
42 | #include <linux/highmem.h> | ||
43 | #include <linux/dma-mapping.h> | ||
44 | #include <linux/hardirq.h> | ||
45 | |||
46 | #include <asm/pgalloc.h> | ||
47 | #include <asm/prom.h> | ||
48 | #include <asm/io.h> | ||
49 | #include <asm/mmu_context.h> | ||
50 | #include <asm/pgtable.h> | ||
51 | #include <asm/mmu.h> | ||
52 | #include <asm/uaccess.h> | ||
53 | #include <asm/smp.h> | ||
54 | #include <asm/machdep.h> | ||
55 | |||
56 | int map_page(unsigned long va, phys_addr_t pa, int flags); | ||
57 | |||
58 | #include <asm/tlbflush.h> | ||
59 | |||
60 | /* | ||
61 | * This address range defaults to a value that is safe for all | ||
62 | * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It | ||
63 | * can be further configured for specific applications under | ||
64 | * the "Advanced Setup" menu. -Matt | ||
65 | */ | ||
66 | #define CONSISTENT_BASE (CONFIG_CONSISTENT_START) | ||
67 | #define CONSISTENT_END (CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE) | ||
68 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) | ||
69 | |||
70 | /* | ||
71 | * This is the page table (2MB) covering uncached, DMA consistent allocations | ||
72 | */ | ||
73 | static pte_t *consistent_pte; | ||
74 | static DEFINE_SPINLOCK(consistent_lock); | ||
75 | |||
76 | /* | ||
77 | * VM region handling support. | ||
78 | * | ||
79 | * This should become something generic, handling VM region allocations for | ||
80 | * vmalloc and similar (ioremap, module space, etc). | ||
81 | * | ||
82 | * I envisage vmalloc()'s supporting vm_struct becoming: | ||
83 | * | ||
84 | * struct vm_struct { | ||
85 | * struct vm_region region; | ||
86 | * unsigned long flags; | ||
87 | * struct page **pages; | ||
88 | * unsigned int nr_pages; | ||
89 | * unsigned long phys_addr; | ||
90 | * }; | ||
91 | * | ||
92 | * get_vm_area() would then call vm_region_alloc with an appropriate | ||
93 | * struct vm_region head (eg): | ||
94 | * | ||
95 | * struct vm_region vmalloc_head = { | ||
96 | * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), | ||
97 | * .vm_start = VMALLOC_START, | ||
98 | * .vm_end = VMALLOC_END, | ||
99 | * }; | ||
100 | * | ||
101 | * However, vmalloc_head.vm_start is variable (typically, it is dependent on | ||
102 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() | ||
103 | * would have to initialise this each time prior to calling vm_region_alloc(). | ||
104 | */ | ||
105 | struct vm_region { | ||
106 | struct list_head vm_list; | ||
107 | unsigned long vm_start; | ||
108 | unsigned long vm_end; | ||
109 | }; | ||
110 | |||
111 | static struct vm_region consistent_head = { | ||
112 | .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), | ||
113 | .vm_start = CONSISTENT_BASE, | ||
114 | .vm_end = CONSISTENT_END, | ||
115 | }; | ||
116 | |||
117 | static struct vm_region * | ||
118 | vm_region_alloc(struct vm_region *head, size_t size, int gfp) | ||
119 | { | ||
120 | unsigned long addr = head->vm_start, end = head->vm_end - size; | ||
121 | unsigned long flags; | ||
122 | struct vm_region *c, *new; | ||
123 | |||
124 | new = kmalloc(sizeof(struct vm_region), gfp); | ||
125 | if (!new) | ||
126 | goto out; | ||
127 | |||
128 | spin_lock_irqsave(&consistent_lock, flags); | ||
129 | |||
130 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
131 | if ((addr + size) < addr) | ||
132 | goto nospc; | ||
133 | if ((addr + size) <= c->vm_start) | ||
134 | goto found; | ||
135 | addr = c->vm_end; | ||
136 | if (addr > end) | ||
137 | goto nospc; | ||
138 | } | ||
139 | |||
140 | found: | ||
141 | /* | ||
142 | * Insert this entry _before_ the one we found. | ||
143 | */ | ||
144 | list_add_tail(&new->vm_list, &c->vm_list); | ||
145 | new->vm_start = addr; | ||
146 | new->vm_end = addr + size; | ||
147 | |||
148 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
149 | return new; | ||
150 | |||
151 | nospc: | ||
152 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
153 | kfree(new); | ||
154 | out: | ||
155 | return NULL; | ||
156 | } | ||
157 | |||
158 | static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr) | ||
159 | { | ||
160 | struct vm_region *c; | ||
161 | |||
162 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
163 | if (c->vm_start == addr) | ||
164 | goto out; | ||
165 | } | ||
166 | c = NULL; | ||
167 | out: | ||
168 | return c; | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Allocate DMA-coherent memory space and return both the kernel remapped | ||
173 | * virtual and bus address for that space. | ||
174 | */ | ||
175 | void * | ||
176 | __dma_alloc_coherent(size_t size, dma_addr_t *handle, int gfp) | ||
177 | { | ||
178 | struct page *page; | ||
179 | struct vm_region *c; | ||
180 | unsigned long order; | ||
181 | u64 mask = 0x00ffffff, limit; /* ISA default */ | ||
182 | |||
183 | if (!consistent_pte) { | ||
184 | printk(KERN_ERR "%s: not initialised\n", __func__); | ||
185 | dump_stack(); | ||
186 | return NULL; | ||
187 | } | ||
188 | |||
189 | size = PAGE_ALIGN(size); | ||
190 | limit = (mask + 1) & ~mask; | ||
191 | if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) { | ||
192 | printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n", | ||
193 | size, mask); | ||
194 | return NULL; | ||
195 | } | ||
196 | |||
197 | order = get_order(size); | ||
198 | |||
199 | if (mask != 0xffffffff) | ||
200 | gfp |= GFP_DMA; | ||
201 | |||
202 | page = alloc_pages(gfp, order); | ||
203 | if (!page) | ||
204 | goto no_page; | ||
205 | |||
206 | /* | ||
207 | * Invalidate any data that might be lurking in the | ||
208 | * kernel direct-mapped region for device DMA. | ||
209 | */ | ||
210 | { | ||
211 | unsigned long kaddr = (unsigned long)page_address(page); | ||
212 | memset(page_address(page), 0, size); | ||
213 | flush_dcache_range(kaddr, kaddr + size); | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Allocate a virtual address in the consistent mapping region. | ||
218 | */ | ||
219 | c = vm_region_alloc(&consistent_head, size, | ||
220 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); | ||
221 | if (c) { | ||
222 | unsigned long vaddr = c->vm_start; | ||
223 | pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr); | ||
224 | struct page *end = page + (1 << order); | ||
225 | |||
226 | /* | ||
227 | * Set the "dma handle" | ||
228 | */ | ||
229 | *handle = page_to_bus(page); | ||
230 | |||
231 | do { | ||
232 | BUG_ON(!pte_none(*pte)); | ||
233 | |||
234 | set_page_count(page, 1); | ||
235 | SetPageReserved(page); | ||
236 | set_pte_at(&init_mm, vaddr, | ||
237 | pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL))); | ||
238 | page++; | ||
239 | pte++; | ||
240 | vaddr += PAGE_SIZE; | ||
241 | } while (size -= PAGE_SIZE); | ||
242 | |||
243 | /* | ||
244 | * Free the otherwise unused pages. | ||
245 | */ | ||
246 | while (page < end) { | ||
247 | set_page_count(page, 1); | ||
248 | __free_page(page); | ||
249 | page++; | ||
250 | } | ||
251 | |||
252 | return (void *)c->vm_start; | ||
253 | } | ||
254 | |||
255 | if (page) | ||
256 | __free_pages(page, order); | ||
257 | no_page: | ||
258 | return NULL; | ||
259 | } | ||
260 | EXPORT_SYMBOL(__dma_alloc_coherent); | ||
261 | |||
262 | /* | ||
263 | * free a page as defined by the above mapping. | ||
264 | */ | ||
265 | void __dma_free_coherent(size_t size, void *vaddr) | ||
266 | { | ||
267 | struct vm_region *c; | ||
268 | unsigned long flags, addr; | ||
269 | pte_t *ptep; | ||
270 | |||
271 | size = PAGE_ALIGN(size); | ||
272 | |||
273 | spin_lock_irqsave(&consistent_lock, flags); | ||
274 | |||
275 | c = vm_region_find(&consistent_head, (unsigned long)vaddr); | ||
276 | if (!c) | ||
277 | goto no_area; | ||
278 | |||
279 | if ((c->vm_end - c->vm_start) != size) { | ||
280 | printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", | ||
281 | __func__, c->vm_end - c->vm_start, size); | ||
282 | dump_stack(); | ||
283 | size = c->vm_end - c->vm_start; | ||
284 | } | ||
285 | |||
286 | ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); | ||
287 | addr = c->vm_start; | ||
288 | do { | ||
289 | pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | ||
290 | unsigned long pfn; | ||
291 | |||
292 | ptep++; | ||
293 | addr += PAGE_SIZE; | ||
294 | |||
295 | if (!pte_none(pte) && pte_present(pte)) { | ||
296 | pfn = pte_pfn(pte); | ||
297 | |||
298 | if (pfn_valid(pfn)) { | ||
299 | struct page *page = pfn_to_page(pfn); | ||
300 | ClearPageReserved(page); | ||
301 | |||
302 | __free_page(page); | ||
303 | continue; | ||
304 | } | ||
305 | } | ||
306 | |||
307 | printk(KERN_CRIT "%s: bad page in kernel page table\n", | ||
308 | __func__); | ||
309 | } while (size -= PAGE_SIZE); | ||
310 | |||
311 | flush_tlb_kernel_range(c->vm_start, c->vm_end); | ||
312 | |||
313 | list_del(&c->vm_list); | ||
314 | |||
315 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
316 | |||
317 | kfree(c); | ||
318 | return; | ||
319 | |||
320 | no_area: | ||
321 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
322 | printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", | ||
323 | __func__, vaddr); | ||
324 | dump_stack(); | ||
325 | } | ||
326 | EXPORT_SYMBOL(__dma_free_coherent); | ||
327 | |||
328 | /* | ||
329 | * Initialise the consistent memory allocation. | ||
330 | */ | ||
331 | static int __init dma_alloc_init(void) | ||
332 | { | ||
333 | pgd_t *pgd; | ||
334 | pmd_t *pmd; | ||
335 | pte_t *pte; | ||
336 | int ret = 0; | ||
337 | |||
338 | spin_lock(&init_mm.page_table_lock); | ||
339 | |||
340 | do { | ||
341 | pgd = pgd_offset(&init_mm, CONSISTENT_BASE); | ||
342 | pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); | ||
343 | if (!pmd) { | ||
344 | printk(KERN_ERR "%s: no pmd tables\n", __func__); | ||
345 | ret = -ENOMEM; | ||
346 | break; | ||
347 | } | ||
348 | WARN_ON(!pmd_none(*pmd)); | ||
349 | |||
350 | pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE); | ||
351 | if (!pte) { | ||
352 | printk(KERN_ERR "%s: no pte tables\n", __func__); | ||
353 | ret = -ENOMEM; | ||
354 | break; | ||
355 | } | ||
356 | |||
357 | consistent_pte = pte; | ||
358 | } while (0); | ||
359 | |||
360 | spin_unlock(&init_mm.page_table_lock); | ||
361 | |||
362 | return ret; | ||
363 | } | ||
364 | |||
365 | core_initcall(dma_alloc_init); | ||
366 | |||
367 | /* | ||
368 | * make an area consistent. | ||
369 | */ | ||
370 | void __dma_sync(void *vaddr, size_t size, int direction) | ||
371 | { | ||
372 | unsigned long start = (unsigned long)vaddr; | ||
373 | unsigned long end = start + size; | ||
374 | |||
375 | switch (direction) { | ||
376 | case DMA_NONE: | ||
377 | BUG(); | ||
378 | case DMA_FROM_DEVICE: /* invalidate only */ | ||
379 | invalidate_dcache_range(start, end); | ||
380 | break; | ||
381 | case DMA_TO_DEVICE: /* writeback only */ | ||
382 | clean_dcache_range(start, end); | ||
383 | break; | ||
384 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ | ||
385 | flush_dcache_range(start, end); | ||
386 | break; | ||
387 | } | ||
388 | } | ||
389 | EXPORT_SYMBOL(__dma_sync); | ||
390 | |||
391 | #ifdef CONFIG_HIGHMEM | ||
392 | /* | ||
393 | * __dma_sync_page() implementation for systems using highmem. | ||
394 | * In this case, each page of a buffer must be kmapped/kunmapped | ||
395 | * in order to have a virtual address for __dma_sync(). This must | ||
396 | * not sleep so kmap_atmomic()/kunmap_atomic() are used. | ||
397 | * | ||
398 | * Note: yes, it is possible and correct to have a buffer extend | ||
399 | * beyond the first page. | ||
400 | */ | ||
401 | static inline void __dma_sync_page_highmem(struct page *page, | ||
402 | unsigned long offset, size_t size, int direction) | ||
403 | { | ||
404 | size_t seg_size = min((size_t)PAGE_SIZE, size) - offset; | ||
405 | size_t cur_size = seg_size; | ||
406 | unsigned long flags, start, seg_offset = offset; | ||
407 | int nr_segs = PAGE_ALIGN(size + (PAGE_SIZE - offset))/PAGE_SIZE; | ||
408 | int seg_nr = 0; | ||
409 | |||
410 | local_irq_save(flags); | ||
411 | |||
412 | do { | ||
413 | start = (unsigned long)kmap_atomic(page + seg_nr, | ||
414 | KM_PPC_SYNC_PAGE) + seg_offset; | ||
415 | |||
416 | /* Sync this buffer segment */ | ||
417 | __dma_sync((void *)start, seg_size, direction); | ||
418 | kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE); | ||
419 | seg_nr++; | ||
420 | |||
421 | /* Calculate next buffer segment size */ | ||
422 | seg_size = min((size_t)PAGE_SIZE, size - cur_size); | ||
423 | |||
424 | /* Add the segment size to our running total */ | ||
425 | cur_size += seg_size; | ||
426 | seg_offset = 0; | ||
427 | } while (seg_nr < nr_segs); | ||
428 | |||
429 | local_irq_restore(flags); | ||
430 | } | ||
431 | #endif /* CONFIG_HIGHMEM */ | ||
432 | |||
433 | /* | ||
434 | * __dma_sync_page makes memory consistent. identical to __dma_sync, but | ||
435 | * takes a struct page instead of a virtual address | ||
436 | */ | ||
437 | void __dma_sync_page(struct page *page, unsigned long offset, | ||
438 | size_t size, int direction) | ||
439 | { | ||
440 | #ifdef CONFIG_HIGHMEM | ||
441 | __dma_sync_page_highmem(page, offset, size, direction); | ||
442 | #else | ||
443 | unsigned long start = (unsigned long)page_address(page) + offset; | ||
444 | __dma_sync((void *)start, size, direction); | ||
445 | #endif | ||
446 | } | ||
447 | EXPORT_SYMBOL(__dma_sync_page); | ||
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S new file mode 100644 index 000000000000..035217d6c0f1 --- /dev/null +++ b/arch/ppc/kernel/entry.S | |||
@@ -0,0 +1,969 @@ | |||
1 | /* | ||
2 | * PowerPC version | ||
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
4 | * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP | ||
5 | * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com> | ||
6 | * Adapted for Power Macintosh by Paul Mackerras. | ||
7 | * Low-level exception handlers and MMU support | ||
8 | * rewritten by Paul Mackerras. | ||
9 | * Copyright (C) 1996 Paul Mackerras. | ||
10 | * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | ||
11 | * | ||
12 | * This file contains the system call entry code, context switch | ||
13 | * code, and exception/interrupt return code for PowerPC. | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or | ||
16 | * modify it under the terms of the GNU General Public License | ||
17 | * as published by the Free Software Foundation; either version | ||
18 | * 2 of the License, or (at your option) any later version. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/config.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/sys.h> | ||
25 | #include <linux/threads.h> | ||
26 | #include <asm/processor.h> | ||
27 | #include <asm/page.h> | ||
28 | #include <asm/mmu.h> | ||
29 | #include <asm/cputable.h> | ||
30 | #include <asm/thread_info.h> | ||
31 | #include <asm/ppc_asm.h> | ||
32 | #include <asm/offsets.h> | ||
33 | #include <asm/unistd.h> | ||
34 | |||
35 | #undef SHOW_SYSCALLS | ||
36 | #undef SHOW_SYSCALLS_TASK | ||
37 | |||
38 | /* | ||
39 | * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE. | ||
40 | */ | ||
41 | #if MSR_KERNEL >= 0x10000 | ||
42 | #define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l | ||
43 | #else | ||
44 | #define LOAD_MSR_KERNEL(r, x) li r,(x) | ||
45 | #endif | ||
46 | |||
47 | #ifdef CONFIG_BOOKE | ||
48 | #include "head_booke.h" | ||
49 | .globl mcheck_transfer_to_handler | ||
50 | mcheck_transfer_to_handler: | ||
51 | mtspr MCHECK_SPRG,r8 | ||
52 | BOOKE_LOAD_MCHECK_STACK | ||
53 | lwz r0,GPR10-INT_FRAME_SIZE(r8) | ||
54 | stw r0,GPR10(r11) | ||
55 | lwz r0,GPR11-INT_FRAME_SIZE(r8) | ||
56 | stw r0,GPR11(r11) | ||
57 | mfspr r8,MCHECK_SPRG | ||
58 | b transfer_to_handler_full | ||
59 | |||
60 | .globl crit_transfer_to_handler | ||
61 | crit_transfer_to_handler: | ||
62 | mtspr CRIT_SPRG,r8 | ||
63 | BOOKE_LOAD_CRIT_STACK | ||
64 | lwz r0,GPR10-INT_FRAME_SIZE(r8) | ||
65 | stw r0,GPR10(r11) | ||
66 | lwz r0,GPR11-INT_FRAME_SIZE(r8) | ||
67 | stw r0,GPR11(r11) | ||
68 | mfspr r8,CRIT_SPRG | ||
69 | /* fall through */ | ||
70 | #endif | ||
71 | |||
72 | #ifdef CONFIG_40x | ||
73 | .globl crit_transfer_to_handler | ||
74 | crit_transfer_to_handler: | ||
75 | lwz r0,crit_r10@l(0) | ||
76 | stw r0,GPR10(r11) | ||
77 | lwz r0,crit_r11@l(0) | ||
78 | stw r0,GPR11(r11) | ||
79 | /* fall through */ | ||
80 | #endif | ||
81 | |||
82 | /* | ||
83 | * This code finishes saving the registers to the exception frame | ||
84 | * and jumps to the appropriate handler for the exception, turning | ||
85 | * on address translation. | ||
86 | * Note that we rely on the caller having set cr0.eq iff the exception | ||
87 | * occurred in kernel mode (i.e. MSR:PR = 0). | ||
88 | */ | ||
89 | .globl transfer_to_handler_full | ||
90 | transfer_to_handler_full: | ||
91 | SAVE_NVGPRS(r11) | ||
92 | /* fall through */ | ||
93 | |||
94 | .globl transfer_to_handler | ||
95 | transfer_to_handler: | ||
96 | stw r2,GPR2(r11) | ||
97 | stw r12,_NIP(r11) | ||
98 | stw r9,_MSR(r11) | ||
99 | andi. r2,r9,MSR_PR | ||
100 | mfctr r12 | ||
101 | mfspr r2,SPRN_XER | ||
102 | stw r12,_CTR(r11) | ||
103 | stw r2,_XER(r11) | ||
104 | mfspr r12,SPRN_SPRG3 | ||
105 | addi r2,r12,-THREAD | ||
106 | tovirt(r2,r2) /* set r2 to current */ | ||
107 | beq 2f /* if from user, fix up THREAD.regs */ | ||
108 | addi r11,r1,STACK_FRAME_OVERHEAD | ||
109 | stw r11,PT_REGS(r12) | ||
110 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | ||
111 | /* Check to see if the dbcr0 register is set up to debug. Use the | ||
112 | single-step bit to do this. */ | ||
113 | lwz r12,THREAD_DBCR0(r12) | ||
114 | andis. r12,r12,DBCR0_IC@h | ||
115 | beq+ 3f | ||
116 | /* From user and task is ptraced - load up global dbcr0 */ | ||
117 | li r12,-1 /* clear all pending debug events */ | ||
118 | mtspr SPRN_DBSR,r12 | ||
119 | lis r11,global_dbcr0@ha | ||
120 | tophys(r11,r11) | ||
121 | addi r11,r11,global_dbcr0@l | ||
122 | lwz r12,0(r11) | ||
123 | mtspr SPRN_DBCR0,r12 | ||
124 | lwz r12,4(r11) | ||
125 | addi r12,r12,-1 | ||
126 | stw r12,4(r11) | ||
127 | #endif | ||
128 | b 3f | ||
129 | 2: /* if from kernel, check interrupted DOZE/NAP mode and | ||
130 | * check for stack overflow | ||
131 | */ | ||
132 | #ifdef CONFIG_6xx | ||
133 | mfspr r11,SPRN_HID0 | ||
134 | mtcr r11 | ||
135 | BEGIN_FTR_SECTION | ||
136 | bt- 8,power_save_6xx_restore /* Check DOZE */ | ||
137 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) | ||
138 | BEGIN_FTR_SECTION | ||
139 | bt- 9,power_save_6xx_restore /* Check NAP */ | ||
140 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) | ||
141 | #endif /* CONFIG_6xx */ | ||
142 | .globl transfer_to_handler_cont | ||
143 | transfer_to_handler_cont: | ||
144 | lwz r11,THREAD_INFO-THREAD(r12) | ||
145 | cmplw r1,r11 /* if r1 <= current->thread_info */ | ||
146 | ble- stack_ovf /* then the kernel stack overflowed */ | ||
147 | 3: | ||
148 | mflr r9 | ||
149 | lwz r11,0(r9) /* virtual address of handler */ | ||
150 | lwz r9,4(r9) /* where to go when done */ | ||
151 | FIX_SRR1(r10,r12) | ||
152 | mtspr SPRN_SRR0,r11 | ||
153 | mtspr SPRN_SRR1,r10 | ||
154 | mtlr r9 | ||
155 | SYNC | ||
156 | RFI /* jump to handler, enable MMU */ | ||
157 | |||
158 | /* | ||
159 | * On kernel stack overflow, load up an initial stack pointer | ||
160 | * and call StackOverflow(regs), which should not return. | ||
161 | */ | ||
162 | stack_ovf: | ||
163 | /* sometimes we use a statically-allocated stack, which is OK. */ | ||
164 | lis r11,_end@h | ||
165 | ori r11,r11,_end@l | ||
166 | cmplw r1,r11 | ||
167 | ble 3b /* r1 <= &_end is OK */ | ||
168 | SAVE_NVGPRS(r11) | ||
169 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
170 | lis r1,init_thread_union@ha | ||
171 | addi r1,r1,init_thread_union@l | ||
172 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | ||
173 | lis r9,StackOverflow@ha | ||
174 | addi r9,r9,StackOverflow@l | ||
175 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) | ||
176 | FIX_SRR1(r10,r12) | ||
177 | mtspr SPRN_SRR0,r9 | ||
178 | mtspr SPRN_SRR1,r10 | ||
179 | SYNC | ||
180 | RFI | ||
181 | |||
182 | /* | ||
183 | * Handle a system call. | ||
184 | */ | ||
185 | .stabs "arch/ppc/kernel/",N_SO,0,0,0f | ||
186 | .stabs "entry.S",N_SO,0,0,0f | ||
187 | 0: | ||
188 | |||
189 | _GLOBAL(DoSyscall) | ||
190 | stw r0,THREAD+LAST_SYSCALL(r2) | ||
191 | stw r3,ORIG_GPR3(r1) | ||
192 | li r12,0 | ||
193 | stw r12,RESULT(r1) | ||
194 | lwz r11,_CCR(r1) /* Clear SO bit in CR */ | ||
195 | rlwinm r11,r11,0,4,2 | ||
196 | stw r11,_CCR(r1) | ||
197 | #ifdef SHOW_SYSCALLS | ||
198 | bl do_show_syscall | ||
199 | #endif /* SHOW_SYSCALLS */ | ||
200 | rlwinm r10,r1,0,0,18 /* current_thread_info() */ | ||
201 | lwz r11,TI_LOCAL_FLAGS(r10) | ||
202 | rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR | ||
203 | stw r11,TI_LOCAL_FLAGS(r10) | ||
204 | lwz r11,TI_FLAGS(r10) | ||
205 | andi. r11,r11,_TIF_SYSCALL_TRACE | ||
206 | bne- syscall_dotrace | ||
207 | syscall_dotrace_cont: | ||
208 | cmplwi 0,r0,NR_syscalls | ||
209 | lis r10,sys_call_table@h | ||
210 | ori r10,r10,sys_call_table@l | ||
211 | slwi r0,r0,2 | ||
212 | bge- 66f | ||
213 | lwzx r10,r10,r0 /* Fetch system call handler [ptr] */ | ||
214 | mtlr r10 | ||
215 | addi r9,r1,STACK_FRAME_OVERHEAD | ||
216 | blrl /* Call handler */ | ||
217 | .globl ret_from_syscall | ||
218 | ret_from_syscall: | ||
219 | #ifdef SHOW_SYSCALLS | ||
220 | bl do_show_syscall_exit | ||
221 | #endif | ||
222 | mr r6,r3 | ||
223 | li r11,-_LAST_ERRNO | ||
224 | cmplw 0,r3,r11 | ||
225 | rlwinm r12,r1,0,0,18 /* current_thread_info() */ | ||
226 | blt+ 30f | ||
227 | lwz r11,TI_LOCAL_FLAGS(r12) | ||
228 | andi. r11,r11,_TIFL_FORCE_NOERROR | ||
229 | bne 30f | ||
230 | neg r3,r3 | ||
231 | lwz r10,_CCR(r1) /* Set SO bit in CR */ | ||
232 | oris r10,r10,0x1000 | ||
233 | stw r10,_CCR(r1) | ||
234 | |||
235 | /* disable interrupts so current_thread_info()->flags can't change */ | ||
236 | 30: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ | ||
237 | SYNC | ||
238 | MTMSRD(r10) | ||
239 | lwz r9,TI_FLAGS(r12) | ||
240 | andi. r0,r9,(_TIF_SYSCALL_TRACE|_TIF_SIGPENDING|_TIF_NEED_RESCHED) | ||
241 | bne- syscall_exit_work | ||
242 | syscall_exit_cont: | ||
243 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
244 | /* If the process has its own DBCR0 value, load it up. The single | ||
245 | step bit tells us that dbcr0 should be loaded. */ | ||
246 | lwz r0,THREAD+THREAD_DBCR0(r2) | ||
247 | andis. r10,r0,DBCR0_IC@h | ||
248 | bnel- load_dbcr0 | ||
249 | #endif | ||
250 | stwcx. r0,0,r1 /* to clear the reservation */ | ||
251 | lwz r4,_LINK(r1) | ||
252 | lwz r5,_CCR(r1) | ||
253 | mtlr r4 | ||
254 | mtcr r5 | ||
255 | lwz r7,_NIP(r1) | ||
256 | lwz r8,_MSR(r1) | ||
257 | FIX_SRR1(r8, r0) | ||
258 | lwz r2,GPR2(r1) | ||
259 | lwz r1,GPR1(r1) | ||
260 | mtspr SPRN_SRR0,r7 | ||
261 | mtspr SPRN_SRR1,r8 | ||
262 | SYNC | ||
263 | RFI | ||
264 | |||
265 | 66: li r3,-ENOSYS | ||
266 | b ret_from_syscall | ||
267 | |||
268 | .globl ret_from_fork | ||
269 | ret_from_fork: | ||
270 | REST_NVGPRS(r1) | ||
271 | bl schedule_tail | ||
272 | li r3,0 | ||
273 | b ret_from_syscall | ||
274 | |||
275 | /* Traced system call support */ | ||
276 | syscall_dotrace: | ||
277 | SAVE_NVGPRS(r1) | ||
278 | li r0,0xc00 | ||
279 | stw r0,TRAP(r1) | ||
280 | bl do_syscall_trace | ||
281 | lwz r0,GPR0(r1) /* Restore original registers */ | ||
282 | lwz r3,GPR3(r1) | ||
283 | lwz r4,GPR4(r1) | ||
284 | lwz r5,GPR5(r1) | ||
285 | lwz r6,GPR6(r1) | ||
286 | lwz r7,GPR7(r1) | ||
287 | lwz r8,GPR8(r1) | ||
288 | REST_NVGPRS(r1) | ||
289 | b syscall_dotrace_cont | ||
290 | |||
291 | syscall_exit_work: | ||
292 | stw r6,RESULT(r1) /* Save result */ | ||
293 | stw r3,GPR3(r1) /* Update return value */ | ||
294 | andi. r0,r9,_TIF_SYSCALL_TRACE | ||
295 | beq 5f | ||
296 | ori r10,r10,MSR_EE | ||
297 | SYNC | ||
298 | MTMSRD(r10) /* re-enable interrupts */ | ||
299 | lwz r4,TRAP(r1) | ||
300 | andi. r4,r4,1 | ||
301 | beq 4f | ||
302 | SAVE_NVGPRS(r1) | ||
303 | li r4,0xc00 | ||
304 | stw r4,TRAP(r1) | ||
305 | 4: | ||
306 | bl do_syscall_trace | ||
307 | REST_NVGPRS(r1) | ||
308 | 2: | ||
309 | lwz r3,GPR3(r1) | ||
310 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ | ||
311 | SYNC | ||
312 | MTMSRD(r10) /* disable interrupts again */ | ||
313 | rlwinm r12,r1,0,0,18 /* current_thread_info() */ | ||
314 | lwz r9,TI_FLAGS(r12) | ||
315 | 5: | ||
316 | andi. r0,r9,_TIF_NEED_RESCHED | ||
317 | bne 1f | ||
318 | lwz r5,_MSR(r1) | ||
319 | andi. r5,r5,MSR_PR | ||
320 | beq syscall_exit_cont | ||
321 | andi. r0,r9,_TIF_SIGPENDING | ||
322 | beq syscall_exit_cont | ||
323 | b do_user_signal | ||
324 | 1: | ||
325 | ori r10,r10,MSR_EE | ||
326 | SYNC | ||
327 | MTMSRD(r10) /* re-enable interrupts */ | ||
328 | bl schedule | ||
329 | b 2b | ||
330 | |||
331 | #ifdef SHOW_SYSCALLS | ||
332 | do_show_syscall: | ||
333 | #ifdef SHOW_SYSCALLS_TASK | ||
334 | lis r11,show_syscalls_task@ha | ||
335 | lwz r11,show_syscalls_task@l(r11) | ||
336 | cmp 0,r2,r11 | ||
337 | bnelr | ||
338 | #endif | ||
339 | stw r31,GPR31(r1) | ||
340 | mflr r31 | ||
341 | lis r3,7f@ha | ||
342 | addi r3,r3,7f@l | ||
343 | lwz r4,GPR0(r1) | ||
344 | lwz r5,GPR3(r1) | ||
345 | lwz r6,GPR4(r1) | ||
346 | lwz r7,GPR5(r1) | ||
347 | lwz r8,GPR6(r1) | ||
348 | lwz r9,GPR7(r1) | ||
349 | bl printk | ||
350 | lis r3,77f@ha | ||
351 | addi r3,r3,77f@l | ||
352 | lwz r4,GPR8(r1) | ||
353 | mr r5,r2 | ||
354 | bl printk | ||
355 | lwz r0,GPR0(r1) | ||
356 | lwz r3,GPR3(r1) | ||
357 | lwz r4,GPR4(r1) | ||
358 | lwz r5,GPR5(r1) | ||
359 | lwz r6,GPR6(r1) | ||
360 | lwz r7,GPR7(r1) | ||
361 | lwz r8,GPR8(r1) | ||
362 | mtlr r31 | ||
363 | lwz r31,GPR31(r1) | ||
364 | blr | ||
365 | |||
366 | do_show_syscall_exit: | ||
367 | #ifdef SHOW_SYSCALLS_TASK | ||
368 | lis r11,show_syscalls_task@ha | ||
369 | lwz r11,show_syscalls_task@l(r11) | ||
370 | cmp 0,r2,r11 | ||
371 | bnelr | ||
372 | #endif | ||
373 | stw r31,GPR31(r1) | ||
374 | mflr r31 | ||
375 | stw r3,RESULT(r1) /* Save result */ | ||
376 | mr r4,r3 | ||
377 | lis r3,79f@ha | ||
378 | addi r3,r3,79f@l | ||
379 | bl printk | ||
380 | lwz r3,RESULT(r1) | ||
381 | mtlr r31 | ||
382 | lwz r31,GPR31(r1) | ||
383 | blr | ||
384 | |||
385 | 7: .string "syscall %d(%x, %x, %x, %x, %x, " | ||
386 | 77: .string "%x), current=%p\n" | ||
387 | 79: .string " -> %x\n" | ||
388 | .align 2,0 | ||
389 | |||
390 | #ifdef SHOW_SYSCALLS_TASK | ||
391 | .data | ||
392 | .globl show_syscalls_task | ||
393 | show_syscalls_task: | ||
394 | .long -1 | ||
395 | .text | ||
396 | #endif | ||
397 | #endif /* SHOW_SYSCALLS */ | ||
398 | |||
399 | /* | ||
400 | * The sigsuspend and rt_sigsuspend system calls can call do_signal | ||
401 | * and thus put the process into the stopped state where we might | ||
402 | * want to examine its user state with ptrace. Therefore we need | ||
403 | * to save all the nonvolatile registers (r13 - r31) before calling | ||
404 | * the C code. | ||
405 | */ | ||
406 | .globl ppc_sigsuspend | ||
407 | ppc_sigsuspend: | ||
408 | SAVE_NVGPRS(r1) | ||
409 | lwz r0,TRAP(r1) | ||
410 | rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ | ||
411 | stw r0,TRAP(r1) /* register set saved */ | ||
412 | b sys_sigsuspend | ||
413 | |||
414 | .globl ppc_rt_sigsuspend | ||
415 | ppc_rt_sigsuspend: | ||
416 | SAVE_NVGPRS(r1) | ||
417 | lwz r0,TRAP(r1) | ||
418 | rlwinm r0,r0,0,0,30 | ||
419 | stw r0,TRAP(r1) | ||
420 | b sys_rt_sigsuspend | ||
421 | |||
422 | .globl ppc_fork | ||
423 | ppc_fork: | ||
424 | SAVE_NVGPRS(r1) | ||
425 | lwz r0,TRAP(r1) | ||
426 | rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ | ||
427 | stw r0,TRAP(r1) /* register set saved */ | ||
428 | b sys_fork | ||
429 | |||
430 | .globl ppc_vfork | ||
431 | ppc_vfork: | ||
432 | SAVE_NVGPRS(r1) | ||
433 | lwz r0,TRAP(r1) | ||
434 | rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ | ||
435 | stw r0,TRAP(r1) /* register set saved */ | ||
436 | b sys_vfork | ||
437 | |||
438 | .globl ppc_clone | ||
439 | ppc_clone: | ||
440 | SAVE_NVGPRS(r1) | ||
441 | lwz r0,TRAP(r1) | ||
442 | rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ | ||
443 | stw r0,TRAP(r1) /* register set saved */ | ||
444 | b sys_clone | ||
445 | |||
446 | .globl ppc_swapcontext | ||
447 | ppc_swapcontext: | ||
448 | SAVE_NVGPRS(r1) | ||
449 | lwz r0,TRAP(r1) | ||
450 | rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ | ||
451 | stw r0,TRAP(r1) /* register set saved */ | ||
452 | b sys_swapcontext | ||
453 | |||
454 | /* | ||
455 | * Top-level page fault handling. | ||
456 | * This is in assembler because if do_page_fault tells us that | ||
457 | * it is a bad kernel page fault, we want to save the non-volatile | ||
458 | * registers before calling bad_page_fault. | ||
459 | */ | ||
460 | .globl handle_page_fault | ||
461 | handle_page_fault: | ||
462 | stw r4,_DAR(r1) | ||
463 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
464 | bl do_page_fault | ||
465 | cmpwi r3,0 | ||
466 | beq+ ret_from_except | ||
467 | SAVE_NVGPRS(r1) | ||
468 | lwz r0,TRAP(r1) | ||
469 | clrrwi r0,r0,1 | ||
470 | stw r0,TRAP(r1) | ||
471 | mr r5,r3 | ||
472 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
473 | lwz r4,_DAR(r1) | ||
474 | bl bad_page_fault | ||
475 | b ret_from_except_full | ||
476 | |||
477 | /* | ||
478 | * This routine switches between two different tasks. The process | ||
479 | * state of one is saved on its kernel stack. Then the state | ||
480 | * of the other is restored from its kernel stack. The memory | ||
481 | * management hardware is updated to the second process's state. | ||
482 | * Finally, we can return to the second process. | ||
483 | * On entry, r3 points to the THREAD for the current task, r4 | ||
484 | * points to the THREAD for the new task. | ||
485 | * | ||
486 | * This routine is always called with interrupts disabled. | ||
487 | * | ||
488 | * Note: there are two ways to get to the "going out" portion | ||
489 | * of this code; either by coming in via the entry (_switch) | ||
490 | * or via "fork" which must set up an environment equivalent | ||
491 | * to the "_switch" path. If you change this , you'll have to | ||
492 | * change the fork code also. | ||
493 | * | ||
494 | * The code which creates the new task context is in 'copy_thread' | ||
495 | * in arch/ppc/kernel/process.c | ||
496 | */ | ||
497 | _GLOBAL(_switch) | ||
498 | stwu r1,-INT_FRAME_SIZE(r1) | ||
499 | mflr r0 | ||
500 | stw r0,INT_FRAME_SIZE+4(r1) | ||
501 | /* r3-r12 are caller saved -- Cort */ | ||
502 | SAVE_NVGPRS(r1) | ||
503 | stw r0,_NIP(r1) /* Return to switch caller */ | ||
504 | mfmsr r11 | ||
505 | li r0,MSR_FP /* Disable floating-point */ | ||
506 | #ifdef CONFIG_ALTIVEC | ||
507 | BEGIN_FTR_SECTION | ||
508 | oris r0,r0,MSR_VEC@h /* Disable altivec */ | ||
509 | mfspr r12,SPRN_VRSAVE /* save vrsave register value */ | ||
510 | stw r12,THREAD+THREAD_VRSAVE(r2) | ||
511 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
512 | #endif /* CONFIG_ALTIVEC */ | ||
513 | #ifdef CONFIG_SPE | ||
514 | oris r0,r0,MSR_SPE@h /* Disable SPE */ | ||
515 | mfspr r12,SPRN_SPEFSCR /* save spefscr register value */ | ||
516 | stw r12,THREAD+THREAD_SPEFSCR(r2) | ||
517 | #endif /* CONFIG_SPE */ | ||
518 | and. r0,r0,r11 /* FP or altivec or SPE enabled? */ | ||
519 | beq+ 1f | ||
520 | andc r11,r11,r0 | ||
521 | MTMSRD(r11) | ||
522 | isync | ||
523 | 1: stw r11,_MSR(r1) | ||
524 | mfcr r10 | ||
525 | stw r10,_CCR(r1) | ||
526 | stw r1,KSP(r3) /* Set old stack pointer */ | ||
527 | |||
528 | #ifdef CONFIG_SMP | ||
529 | /* We need a sync somewhere here to make sure that if the | ||
530 | * previous task gets rescheduled on another CPU, it sees all | ||
531 | * stores it has performed on this one. | ||
532 | */ | ||
533 | sync | ||
534 | #endif /* CONFIG_SMP */ | ||
535 | |||
536 | tophys(r0,r4) | ||
537 | CLR_TOP32(r0) | ||
538 | mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */ | ||
539 | lwz r1,KSP(r4) /* Load new stack pointer */ | ||
540 | |||
541 | /* save the old current 'last' for return value */ | ||
542 | mr r3,r2 | ||
543 | addi r2,r4,-THREAD /* Update current */ | ||
544 | |||
545 | #ifdef CONFIG_ALTIVEC | ||
546 | BEGIN_FTR_SECTION | ||
547 | lwz r0,THREAD+THREAD_VRSAVE(r2) | ||
548 | mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ | ||
549 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
550 | #endif /* CONFIG_ALTIVEC */ | ||
551 | #ifdef CONFIG_SPE | ||
552 | lwz r0,THREAD+THREAD_SPEFSCR(r2) | ||
553 | mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */ | ||
554 | #endif /* CONFIG_SPE */ | ||
555 | |||
556 | lwz r0,_CCR(r1) | ||
557 | mtcrf 0xFF,r0 | ||
558 | /* r3-r12 are destroyed -- Cort */ | ||
559 | REST_NVGPRS(r1) | ||
560 | |||
561 | lwz r4,_NIP(r1) /* Return to _switch caller in new task */ | ||
562 | mtlr r4 | ||
563 | addi r1,r1,INT_FRAME_SIZE | ||
564 | blr | ||
565 | |||
566 | .globl sigreturn_exit | ||
567 | sigreturn_exit: | ||
568 | subi r1,r3,STACK_FRAME_OVERHEAD | ||
569 | rlwinm r12,r1,0,0,18 /* current_thread_info() */ | ||
570 | lwz r9,TI_FLAGS(r12) | ||
571 | andi. r0,r9,_TIF_SYSCALL_TRACE | ||
572 | bnel- do_syscall_trace | ||
573 | /* fall through */ | ||
574 | |||
575 | .globl ret_from_except_full | ||
576 | ret_from_except_full: | ||
577 | REST_NVGPRS(r1) | ||
578 | /* fall through */ | ||
579 | |||
580 | .globl ret_from_except | ||
581 | ret_from_except: | ||
582 | /* Hard-disable interrupts so that current_thread_info()->flags | ||
583 | * can't change between when we test it and when we return | ||
584 | * from the interrupt. */ | ||
585 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) | ||
586 | SYNC /* Some chip revs have problems here... */ | ||
587 | MTMSRD(r10) /* disable interrupts */ | ||
588 | |||
589 | lwz r3,_MSR(r1) /* Returning to user mode? */ | ||
590 | andi. r0,r3,MSR_PR | ||
591 | beq resume_kernel | ||
592 | |||
593 | user_exc_return: /* r10 contains MSR_KERNEL here */ | ||
594 | /* Check current_thread_info()->flags */ | ||
595 | rlwinm r9,r1,0,0,18 | ||
596 | lwz r9,TI_FLAGS(r9) | ||
597 | andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED) | ||
598 | bne do_work | ||
599 | |||
600 | restore_user: | ||
601 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
602 | /* Check whether this process has its own DBCR0 value. The single | ||
603 | step bit tells us that dbcr0 should be loaded. */ | ||
604 | lwz r0,THREAD+THREAD_DBCR0(r2) | ||
605 | andis. r10,r0,DBCR0_IC@h | ||
606 | bnel- load_dbcr0 | ||
607 | #endif | ||
608 | |||
609 | #ifdef CONFIG_PREEMPT | ||
610 | b restore | ||
611 | |||
612 | /* N.B. the only way to get here is from the beq following ret_from_except. */ | ||
613 | resume_kernel: | ||
614 | /* check current_thread_info->preempt_count */ | ||
615 | rlwinm r9,r1,0,0,18 | ||
616 | lwz r0,TI_PREEMPT(r9) | ||
617 | cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ | ||
618 | bne restore | ||
619 | lwz r0,TI_FLAGS(r9) | ||
620 | andi. r0,r0,_TIF_NEED_RESCHED | ||
621 | beq+ restore | ||
622 | andi. r0,r3,MSR_EE /* interrupts off? */ | ||
623 | beq restore /* don't schedule if so */ | ||
624 | 1: bl preempt_schedule_irq | ||
625 | rlwinm r9,r1,0,0,18 | ||
626 | lwz r3,TI_FLAGS(r9) | ||
627 | andi. r0,r3,_TIF_NEED_RESCHED | ||
628 | bne- 1b | ||
629 | #else | ||
630 | resume_kernel: | ||
631 | #endif /* CONFIG_PREEMPT */ | ||
632 | |||
633 | /* interrupts are hard-disabled at this point */ | ||
634 | restore: | ||
635 | lwz r0,GPR0(r1) | ||
636 | lwz r2,GPR2(r1) | ||
637 | REST_4GPRS(3, r1) | ||
638 | REST_2GPRS(7, r1) | ||
639 | |||
640 | lwz r10,_XER(r1) | ||
641 | lwz r11,_CTR(r1) | ||
642 | mtspr SPRN_XER,r10 | ||
643 | mtctr r11 | ||
644 | |||
645 | PPC405_ERR77(0,r1) | ||
646 | stwcx. r0,0,r1 /* to clear the reservation */ | ||
647 | |||
648 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | ||
649 | lwz r9,_MSR(r1) | ||
650 | andi. r10,r9,MSR_RI /* check if this exception occurred */ | ||
651 | beql nonrecoverable /* at a bad place (MSR:RI = 0) */ | ||
652 | |||
653 | lwz r10,_CCR(r1) | ||
654 | lwz r11,_LINK(r1) | ||
655 | mtcrf 0xFF,r10 | ||
656 | mtlr r11 | ||
657 | |||
658 | /* | ||
659 | * Once we put values in SRR0 and SRR1, we are in a state | ||
660 | * where exceptions are not recoverable, since taking an | ||
661 | * exception will trash SRR0 and SRR1. Therefore we clear the | ||
662 | * MSR:RI bit to indicate this. If we do take an exception, | ||
663 | * we can't return to the point of the exception but we | ||
664 | * can restart the exception exit path at the label | ||
665 | * exc_exit_restart below. -- paulus | ||
666 | */ | ||
667 | LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI) | ||
668 | SYNC | ||
669 | MTMSRD(r10) /* clear the RI bit */ | ||
670 | .globl exc_exit_restart | ||
671 | exc_exit_restart: | ||
672 | lwz r9,_MSR(r1) | ||
673 | lwz r12,_NIP(r1) | ||
674 | FIX_SRR1(r9,r10) | ||
675 | mtspr SPRN_SRR0,r12 | ||
676 | mtspr SPRN_SRR1,r9 | ||
677 | REST_4GPRS(9, r1) | ||
678 | lwz r1,GPR1(r1) | ||
679 | .globl exc_exit_restart_end | ||
680 | exc_exit_restart_end: | ||
681 | SYNC | ||
682 | RFI | ||
683 | |||
684 | #else /* !(CONFIG_4xx || CONFIG_BOOKE) */ | ||
685 | /* | ||
686 | * This is a bit different on 4xx/Book-E because it doesn't have | ||
687 | * the RI bit in the MSR. | ||
688 | * The TLB miss handler checks if we have interrupted | ||
689 | * the exception exit path and restarts it if so | ||
690 | * (well maybe one day it will... :). | ||
691 | */ | ||
692 | lwz r11,_LINK(r1) | ||
693 | mtlr r11 | ||
694 | lwz r10,_CCR(r1) | ||
695 | mtcrf 0xff,r10 | ||
696 | REST_2GPRS(9, r1) | ||
697 | .globl exc_exit_restart | ||
698 | exc_exit_restart: | ||
699 | lwz r11,_NIP(r1) | ||
700 | lwz r12,_MSR(r1) | ||
701 | exc_exit_start: | ||
702 | mtspr SPRN_SRR0,r11 | ||
703 | mtspr SPRN_SRR1,r12 | ||
704 | REST_2GPRS(11, r1) | ||
705 | lwz r1,GPR1(r1) | ||
706 | .globl exc_exit_restart_end | ||
707 | exc_exit_restart_end: | ||
708 | PPC405_ERR77_SYNC | ||
709 | rfi | ||
710 | b . /* prevent prefetch past rfi */ | ||
711 | |||
712 | /* | ||
713 | * Returning from a critical interrupt in user mode doesn't need | ||
714 | * to be any different from a normal exception. For a critical | ||
715 | * interrupt in the kernel, we just return (without checking for | ||
716 | * preemption) since the interrupt may have happened at some crucial | ||
717 | * place (e.g. inside the TLB miss handler), and because we will be | ||
718 | * running with r1 pointing into critical_stack, not the current | ||
719 | * process's kernel stack (and therefore current_thread_info() will | ||
720 | * give the wrong answer). | ||
721 | * We have to restore various SPRs that may have been in use at the | ||
722 | * time of the critical interrupt. | ||
723 | * | ||
724 | */ | ||
725 | .globl ret_from_crit_exc | ||
726 | ret_from_crit_exc: | ||
727 | REST_NVGPRS(r1) | ||
728 | lwz r3,_MSR(r1) | ||
729 | andi. r3,r3,MSR_PR | ||
730 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) | ||
731 | bne user_exc_return | ||
732 | |||
733 | lwz r0,GPR0(r1) | ||
734 | lwz r2,GPR2(r1) | ||
735 | REST_4GPRS(3, r1) | ||
736 | REST_2GPRS(7, r1) | ||
737 | |||
738 | lwz r10,_XER(r1) | ||
739 | lwz r11,_CTR(r1) | ||
740 | mtspr SPRN_XER,r10 | ||
741 | mtctr r11 | ||
742 | |||
743 | PPC405_ERR77(0,r1) | ||
744 | stwcx. r0,0,r1 /* to clear the reservation */ | ||
745 | |||
746 | lwz r11,_LINK(r1) | ||
747 | mtlr r11 | ||
748 | lwz r10,_CCR(r1) | ||
749 | mtcrf 0xff,r10 | ||
750 | #ifdef CONFIG_40x | ||
751 | /* avoid any possible TLB misses here by turning off MSR.DR, we | ||
752 | * assume the instructions here are mapped by a pinned TLB entry */ | ||
753 | li r10,MSR_IR | ||
754 | mtmsr r10 | ||
755 | isync | ||
756 | tophys(r1, r1) | ||
757 | #endif | ||
758 | lwz r9,_DEAR(r1) | ||
759 | lwz r10,_ESR(r1) | ||
760 | mtspr SPRN_DEAR,r9 | ||
761 | mtspr SPRN_ESR,r10 | ||
762 | lwz r11,_NIP(r1) | ||
763 | lwz r12,_MSR(r1) | ||
764 | mtspr SPRN_CSRR0,r11 | ||
765 | mtspr SPRN_CSRR1,r12 | ||
766 | lwz r9,GPR9(r1) | ||
767 | lwz r12,GPR12(r1) | ||
768 | lwz r10,GPR10(r1) | ||
769 | lwz r11,GPR11(r1) | ||
770 | lwz r1,GPR1(r1) | ||
771 | PPC405_ERR77_SYNC | ||
772 | rfci | ||
773 | b . /* prevent prefetch past rfci */ | ||
774 | |||
775 | #ifdef CONFIG_BOOKE | ||
776 | /* | ||
777 | * Return from a machine check interrupt, similar to a critical | ||
778 | * interrupt. | ||
779 | */ | ||
780 | .globl ret_from_mcheck_exc | ||
781 | ret_from_mcheck_exc: | ||
782 | REST_NVGPRS(r1) | ||
783 | lwz r3,_MSR(r1) | ||
784 | andi. r3,r3,MSR_PR | ||
785 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) | ||
786 | bne user_exc_return | ||
787 | |||
788 | lwz r0,GPR0(r1) | ||
789 | lwz r2,GPR2(r1) | ||
790 | REST_4GPRS(3, r1) | ||
791 | REST_2GPRS(7, r1) | ||
792 | |||
793 | lwz r10,_XER(r1) | ||
794 | lwz r11,_CTR(r1) | ||
795 | mtspr SPRN_XER,r10 | ||
796 | mtctr r11 | ||
797 | |||
798 | stwcx. r0,0,r1 /* to clear the reservation */ | ||
799 | |||
800 | lwz r11,_LINK(r1) | ||
801 | mtlr r11 | ||
802 | lwz r10,_CCR(r1) | ||
803 | mtcrf 0xff,r10 | ||
804 | lwz r9,_DEAR(r1) | ||
805 | lwz r10,_ESR(r1) | ||
806 | mtspr SPRN_DEAR,r9 | ||
807 | mtspr SPRN_ESR,r10 | ||
808 | lwz r11,_NIP(r1) | ||
809 | lwz r12,_MSR(r1) | ||
810 | mtspr SPRN_MCSRR0,r11 | ||
811 | mtspr SPRN_MCSRR1,r12 | ||
812 | lwz r9,GPR9(r1) | ||
813 | lwz r12,GPR12(r1) | ||
814 | lwz r10,GPR10(r1) | ||
815 | lwz r11,GPR11(r1) | ||
816 | lwz r1,GPR1(r1) | ||
817 | RFMCI | ||
818 | #endif /* CONFIG_BOOKE */ | ||
819 | |||
820 | /* | ||
821 | * Load the DBCR0 value for a task that is being ptraced, | ||
822 | * having first saved away the global DBCR0. Note that r0 | ||
823 | * has the dbcr0 value to set upon entry to this. | ||
824 | */ | ||
825 | load_dbcr0: | ||
826 | mfmsr r10 /* first disable debug exceptions */ | ||
827 | rlwinm r10,r10,0,~MSR_DE | ||
828 | mtmsr r10 | ||
829 | isync | ||
830 | mfspr r10,SPRN_DBCR0 | ||
831 | lis r11,global_dbcr0@ha | ||
832 | addi r11,r11,global_dbcr0@l | ||
833 | stw r10,0(r11) | ||
834 | mtspr SPRN_DBCR0,r0 | ||
835 | lwz r10,4(r11) | ||
836 | addi r10,r10,1 | ||
837 | stw r10,4(r11) | ||
838 | li r11,-1 | ||
839 | mtspr SPRN_DBSR,r11 /* clear all pending debug events */ | ||
840 | blr | ||
841 | |||
842 | .comm global_dbcr0,8 | ||
843 | #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ | ||
844 | |||
845 | do_work: /* r10 contains MSR_KERNEL here */ | ||
846 | andi. r0,r9,_TIF_NEED_RESCHED | ||
847 | beq do_user_signal | ||
848 | |||
849 | do_resched: /* r10 contains MSR_KERNEL here */ | ||
850 | ori r10,r10,MSR_EE | ||
851 | SYNC | ||
852 | MTMSRD(r10) /* hard-enable interrupts */ | ||
853 | bl schedule | ||
854 | recheck: | ||
855 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) | ||
856 | SYNC | ||
857 | MTMSRD(r10) /* disable interrupts */ | ||
858 | rlwinm r9,r1,0,0,18 | ||
859 | lwz r9,TI_FLAGS(r9) | ||
860 | andi. r0,r9,_TIF_NEED_RESCHED | ||
861 | bne- do_resched | ||
862 | andi. r0,r9,_TIF_SIGPENDING | ||
863 | beq restore_user | ||
864 | do_user_signal: /* r10 contains MSR_KERNEL here */ | ||
865 | ori r10,r10,MSR_EE | ||
866 | SYNC | ||
867 | MTMSRD(r10) /* hard-enable interrupts */ | ||
868 | /* save r13-r31 in the exception frame, if not already done */ | ||
869 | lwz r3,TRAP(r1) | ||
870 | andi. r0,r3,1 | ||
871 | beq 2f | ||
872 | SAVE_NVGPRS(r1) | ||
873 | rlwinm r3,r3,0,0,30 | ||
874 | stw r3,TRAP(r1) | ||
875 | 2: li r3,0 | ||
876 | addi r4,r1,STACK_FRAME_OVERHEAD | ||
877 | bl do_signal | ||
878 | REST_NVGPRS(r1) | ||
879 | b recheck | ||
880 | |||
881 | /* | ||
882 | * We come here when we are at the end of handling an exception | ||
883 | * that occurred at a place where taking an exception will lose | ||
884 | * state information, such as the contents of SRR0 and SRR1. | ||
885 | */ | ||
886 | nonrecoverable: | ||
887 | lis r10,exc_exit_restart_end@ha | ||
888 | addi r10,r10,exc_exit_restart_end@l | ||
889 | cmplw r12,r10 | ||
890 | bge 3f | ||
891 | lis r11,exc_exit_restart@ha | ||
892 | addi r11,r11,exc_exit_restart@l | ||
893 | cmplw r12,r11 | ||
894 | blt 3f | ||
895 | lis r10,ee_restarts@ha | ||
896 | lwz r12,ee_restarts@l(r10) | ||
897 | addi r12,r12,1 | ||
898 | stw r12,ee_restarts@l(r10) | ||
899 | mr r12,r11 /* restart at exc_exit_restart */ | ||
900 | blr | ||
901 | 3: /* OK, we can't recover, kill this process */ | ||
902 | /* but the 601 doesn't implement the RI bit, so assume it's OK */ | ||
903 | BEGIN_FTR_SECTION | ||
904 | blr | ||
905 | END_FTR_SECTION_IFSET(CPU_FTR_601) | ||
906 | lwz r3,TRAP(r1) | ||
907 | andi. r0,r3,1 | ||
908 | beq 4f | ||
909 | SAVE_NVGPRS(r1) | ||
910 | rlwinm r3,r3,0,0,30 | ||
911 | stw r3,TRAP(r1) | ||
912 | 4: addi r3,r1,STACK_FRAME_OVERHEAD | ||
913 | bl nonrecoverable_exception | ||
914 | /* shouldn't return */ | ||
915 | b 4b | ||
916 | |||
917 | .comm ee_restarts,4 | ||
918 | |||
919 | /* | ||
920 | * PROM code for specific machines follows. Put it | ||
921 | * here so it's easy to add arch-specific sections later. | ||
922 | * -- Cort | ||
923 | */ | ||
924 | #ifdef CONFIG_PPC_OF | ||
925 | /* | ||
926 | * On CHRP, the Run-Time Abstraction Services (RTAS) have to be | ||
927 | * called with the MMU off. | ||
928 | */ | ||
929 | _GLOBAL(enter_rtas) | ||
930 | stwu r1,-INT_FRAME_SIZE(r1) | ||
931 | mflr r0 | ||
932 | stw r0,INT_FRAME_SIZE+4(r1) | ||
933 | lis r4,rtas_data@ha | ||
934 | lwz r4,rtas_data@l(r4) | ||
935 | lis r6,1f@ha /* physical return address for rtas */ | ||
936 | addi r6,r6,1f@l | ||
937 | tophys(r6,r6) | ||
938 | tophys(r7,r1) | ||
939 | lis r8,rtas_entry@ha | ||
940 | lwz r8,rtas_entry@l(r8) | ||
941 | mfmsr r9 | ||
942 | stw r9,8(r1) | ||
943 | LOAD_MSR_KERNEL(r0,MSR_KERNEL) | ||
944 | SYNC /* disable interrupts so SRR0/1 */ | ||
945 | MTMSRD(r0) /* don't get trashed */ | ||
946 | li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR) | ||
947 | mtlr r6 | ||
948 | CLR_TOP32(r7) | ||
949 | mtspr SPRN_SPRG2,r7 | ||
950 | mtspr SPRN_SRR0,r8 | ||
951 | mtspr SPRN_SRR1,r9 | ||
952 | RFI | ||
953 | 1: tophys(r9,r1) | ||
954 | lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */ | ||
955 | lwz r9,8(r9) /* original msr value */ | ||
956 | FIX_SRR1(r9,r0) | ||
957 | addi r1,r1,INT_FRAME_SIZE | ||
958 | li r0,0 | ||
959 | mtspr SPRN_SPRG2,r0 | ||
960 | mtspr SPRN_SRR0,r8 | ||
961 | mtspr SPRN_SRR1,r9 | ||
962 | RFI /* return to caller */ | ||
963 | |||
964 | .globl machine_check_in_rtas | ||
965 | machine_check_in_rtas: | ||
966 | twi 31,0,0 | ||
967 | /* XXX load up BATs and panic */ | ||
968 | |||
969 | #endif /* CONFIG_PPC_OF */ | ||
diff --git a/arch/ppc/kernel/find_name.c b/arch/ppc/kernel/find_name.c new file mode 100644 index 000000000000..3c0fa8e0c077 --- /dev/null +++ b/arch/ppc/kernel/find_name.c | |||
@@ -0,0 +1,48 @@ | |||
1 | #include <stdio.h> | ||
2 | #include <asm/page.h> | ||
3 | #include <sys/mman.h> | ||
4 | #include <strings.h> | ||
5 | /* | ||
6 | * Finds a given address in the System.map and prints it out | ||
7 | * with its neighbors. -- Cort | ||
8 | */ | ||
9 | |||
10 | int main(int argc, char **argv) | ||
11 | { | ||
12 | unsigned long addr, cmp, i; | ||
13 | FILE *f; | ||
14 | char s[256], last[256]; | ||
15 | |||
16 | if ( argc < 2 ) | ||
17 | { | ||
18 | fprintf(stderr, "Usage: %s <address>\n", argv[0]); | ||
19 | return -1; | ||
20 | } | ||
21 | |||
22 | for ( i = 1 ; argv[i] ; i++ ) | ||
23 | { | ||
24 | sscanf( argv[i], "%0lx", &addr ); | ||
25 | /* adjust if addr is relative to kernelbase */ | ||
26 | if ( addr < PAGE_OFFSET ) | ||
27 | addr += PAGE_OFFSET; | ||
28 | |||
29 | if ( (f = fopen( "System.map", "r" )) == NULL ) | ||
30 | { | ||
31 | perror("fopen()\n"); | ||
32 | exit(-1); | ||
33 | } | ||
34 | |||
35 | while ( !feof(f) ) | ||
36 | { | ||
37 | fgets(s, 255 , f); | ||
38 | sscanf( s, "%0lx", &cmp ); | ||
39 | if ( addr < cmp ) | ||
40 | break; | ||
41 | strcpy( last, s); | ||
42 | } | ||
43 | |||
44 | printf( "%s%s", last, s ); | ||
45 | } | ||
46 | fclose(f); | ||
47 | return 0; | ||
48 | } | ||
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S new file mode 100644 index 000000000000..1a89a71e0acc --- /dev/null +++ b/arch/ppc/kernel/head.S | |||
@@ -0,0 +1,1710 @@ | |||
1 | /* | ||
2 | * PowerPC version | ||
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
4 | * | ||
5 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | ||
6 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
7 | * Adapted for Power Macintosh by Paul Mackerras. | ||
8 | * Low-level exception handlers and MMU support | ||
9 | * rewritten by Paul Mackerras. | ||
10 | * Copyright (C) 1996 Paul Mackerras. | ||
11 | * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | ||
12 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
13 | * | ||
14 | * This file contains the low-level support and setup for the | ||
15 | * PowerPC platform, including trap and interrupt dispatch. | ||
16 | * (The PPC 8xx embedded CPUs use head_8xx.S instead.) | ||
17 | * | ||
18 | * This program is free software; you can redistribute it and/or | ||
19 | * modify it under the terms of the GNU General Public License | ||
20 | * as published by the Free Software Foundation; either version | ||
21 | * 2 of the License, or (at your option) any later version. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #include <linux/config.h> | ||
26 | #include <asm/processor.h> | ||
27 | #include <asm/page.h> | ||
28 | #include <asm/mmu.h> | ||
29 | #include <asm/pgtable.h> | ||
30 | #include <asm/cputable.h> | ||
31 | #include <asm/cache.h> | ||
32 | #include <asm/thread_info.h> | ||
33 | #include <asm/ppc_asm.h> | ||
34 | #include <asm/offsets.h> | ||
35 | |||
36 | #ifdef CONFIG_APUS | ||
37 | #include <asm/amigappc.h> | ||
38 | #endif | ||
39 | |||
40 | #ifdef CONFIG_PPC64BRIDGE | ||
41 | #define LOAD_BAT(n, reg, RA, RB) \ | ||
42 | ld RA,(n*32)+0(reg); \ | ||
43 | ld RB,(n*32)+8(reg); \ | ||
44 | mtspr SPRN_IBAT##n##U,RA; \ | ||
45 | mtspr SPRN_IBAT##n##L,RB; \ | ||
46 | ld RA,(n*32)+16(reg); \ | ||
47 | ld RB,(n*32)+24(reg); \ | ||
48 | mtspr SPRN_DBAT##n##U,RA; \ | ||
49 | mtspr SPRN_DBAT##n##L,RB; \ | ||
50 | |||
51 | #else /* CONFIG_PPC64BRIDGE */ | ||
52 | |||
53 | /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ | ||
54 | #define LOAD_BAT(n, reg, RA, RB) \ | ||
55 | /* see the comment for clear_bats() -- Cort */ \ | ||
56 | li RA,0; \ | ||
57 | mtspr SPRN_IBAT##n##U,RA; \ | ||
58 | mtspr SPRN_DBAT##n##U,RA; \ | ||
59 | lwz RA,(n*16)+0(reg); \ | ||
60 | lwz RB,(n*16)+4(reg); \ | ||
61 | mtspr SPRN_IBAT##n##U,RA; \ | ||
62 | mtspr SPRN_IBAT##n##L,RB; \ | ||
63 | beq 1f; \ | ||
64 | lwz RA,(n*16)+8(reg); \ | ||
65 | lwz RB,(n*16)+12(reg); \ | ||
66 | mtspr SPRN_DBAT##n##U,RA; \ | ||
67 | mtspr SPRN_DBAT##n##L,RB; \ | ||
68 | 1: | ||
69 | #endif /* CONFIG_PPC64BRIDGE */ | ||
70 | |||
71 | .text | ||
72 | .stabs "arch/ppc/kernel/",N_SO,0,0,0f | ||
73 | .stabs "head.S",N_SO,0,0,0f | ||
74 | 0: | ||
75 | .globl _stext | ||
76 | _stext: | ||
77 | |||
78 | /* | ||
79 | * _start is defined this way because the XCOFF loader in the OpenFirmware | ||
80 | * on the powermac expects the entry point to be a procedure descriptor. | ||
81 | */ | ||
82 | .text | ||
83 | .globl _start | ||
84 | _start: | ||
85 | /* | ||
86 | * These are here for legacy reasons, the kernel used to | ||
87 | * need to look like a coff function entry for the pmac | ||
88 | * but we're always started by some kind of bootloader now. | ||
89 | * -- Cort | ||
90 | */ | ||
91 | nop /* used by __secondary_hold on prep (mtx) and chrp smp */ | ||
92 | nop /* used by __secondary_hold on prep (mtx) and chrp smp */ | ||
93 | nop | ||
94 | |||
95 | /* PMAC | ||
96 | * Enter here with the kernel text, data and bss loaded starting at | ||
97 | * 0, running with virtual == physical mapping. | ||
98 | * r5 points to the prom entry point (the client interface handler | ||
99 | * address). Address translation is turned on, with the prom | ||
100 | * managing the hash table. Interrupts are disabled. The stack | ||
101 | * pointer (r1) points to just below the end of the half-meg region | ||
102 | * from 0x380000 - 0x400000, which is mapped in already. | ||
103 | * | ||
104 | * If we are booted from MacOS via BootX, we enter with the kernel | ||
105 | * image loaded somewhere, and the following values in registers: | ||
106 | * r3: 'BooX' (0x426f6f58) | ||
107 | * r4: virtual address of boot_infos_t | ||
108 | * r5: 0 | ||
109 | * | ||
110 | * APUS | ||
111 | * r3: 'APUS' | ||
112 | * r4: physical address of memory base | ||
113 | * Linux/m68k style BootInfo structure at &_end. | ||
114 | * | ||
115 | * PREP | ||
116 | * This is jumped to on prep systems right after the kernel is relocated | ||
117 | * to its proper place in memory by the boot loader. The expected layout | ||
118 | * of the regs is: | ||
119 | * r3: ptr to residual data | ||
120 | * r4: initrd_start or if no initrd then 0 | ||
121 | * r5: initrd_end - unused if r4 is 0 | ||
122 | * r6: Start of command line string | ||
123 | * r7: End of command line string | ||
124 | * | ||
125 | * This just gets a minimal mmu environment setup so we can call | ||
126 | * start_here() to do the real work. | ||
127 | * -- Cort | ||
128 | */ | ||
129 | |||
130 | .globl __start | ||
131 | __start: | ||
132 | /* | ||
133 | * We have to do any OF calls before we map ourselves to KERNELBASE, | ||
134 | * because OF may have I/O devices mapped into that area | ||
135 | * (particularly on CHRP). | ||
136 | */ | ||
137 | mr r31,r3 /* save parameters */ | ||
138 | mr r30,r4 | ||
139 | mr r29,r5 | ||
140 | mr r28,r6 | ||
141 | mr r27,r7 | ||
142 | li r24,0 /* cpu # */ | ||
143 | |||
144 | /* | ||
145 | * early_init() does the early machine identification and does | ||
146 | * the necessary low-level setup and clears the BSS | ||
147 | * -- Cort <cort@fsmlabs.com> | ||
148 | */ | ||
149 | bl early_init | ||
150 | |||
151 | /* | ||
152 | * On POWER4, we first need to tweak some CPU configuration registers | ||
153 | * like real mode cache inhibit or exception base | ||
154 | */ | ||
155 | #ifdef CONFIG_POWER4 | ||
156 | bl __970_cpu_preinit | ||
157 | #endif /* CONFIG_POWER4 */ | ||
158 | |||
159 | #ifdef CONFIG_APUS | ||
160 | /* On APUS the __va/__pa constants need to be set to the correct | ||
161 | * values before continuing. | ||
162 | */ | ||
163 | mr r4,r30 | ||
164 | bl fix_mem_constants | ||
165 | #endif /* CONFIG_APUS */ | ||
166 | |||
167 | /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains | ||
168 | * the physical address we are running at, returned by early_init() | ||
169 | */ | ||
170 | bl mmu_off | ||
171 | __after_mmu_off: | ||
172 | #ifndef CONFIG_POWER4 | ||
173 | bl clear_bats | ||
174 | bl flush_tlbs | ||
175 | |||
176 | bl initial_bats | ||
177 | #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) | ||
178 | bl setup_disp_bat | ||
179 | #endif | ||
180 | #else /* CONFIG_POWER4 */ | ||
181 | bl reloc_offset | ||
182 | bl initial_mm_power4 | ||
183 | #endif /* CONFIG_POWER4 */ | ||
184 | |||
185 | /* | ||
186 | * Call setup_cpu for CPU 0 and initialize 6xx Idle | ||
187 | */ | ||
188 | bl reloc_offset | ||
189 | li r24,0 /* cpu# */ | ||
190 | bl call_setup_cpu /* Call setup_cpu for this CPU */ | ||
191 | #ifdef CONFIG_6xx | ||
192 | bl reloc_offset | ||
193 | bl init_idle_6xx | ||
194 | #endif /* CONFIG_6xx */ | ||
195 | #ifdef CONFIG_POWER4 | ||
196 | bl reloc_offset | ||
197 | bl init_idle_power4 | ||
198 | #endif /* CONFIG_POWER4 */ | ||
199 | |||
200 | |||
201 | #ifndef CONFIG_APUS | ||
202 | /* | ||
203 | * We need to run with _start at physical address 0. | ||
204 | * On CHRP, we are loaded at 0x10000 since OF on CHRP uses | ||
205 | * the exception vectors at 0 (and therefore this copy | ||
206 | * overwrites OF's exception vectors with our own). | ||
207 | * If the MMU is already turned on, we copy stuff to KERNELBASE, | ||
208 | * otherwise we copy it to 0. | ||
209 | */ | ||
210 | bl reloc_offset | ||
211 | mr r26,r3 | ||
212 | addis r4,r3,KERNELBASE@h /* current address of _start */ | ||
213 | cmpwi 0,r4,0 /* are we already running at 0? */ | ||
214 | bne relocate_kernel | ||
215 | #endif /* CONFIG_APUS */ | ||
216 | /* | ||
217 | * we now have the 1st 16M of ram mapped with the bats. | ||
218 | * prep needs the mmu to be turned on here, but pmac already has it on. | ||
219 | * this shouldn't bother the pmac since it just gets turned on again | ||
220 | * as we jump to our code at KERNELBASE. -- Cort | ||
221 | * Actually no, pmac doesn't have it on any more. BootX enters with MMU | ||
222 | * off, and in other cases, we now turn it off before changing BATs above. | ||
223 | */ | ||
224 | turn_on_mmu: | ||
225 | mfmsr r0 | ||
226 | ori r0,r0,MSR_DR|MSR_IR | ||
227 | mtspr SPRN_SRR1,r0 | ||
228 | lis r0,start_here@h | ||
229 | ori r0,r0,start_here@l | ||
230 | mtspr SPRN_SRR0,r0 | ||
231 | SYNC | ||
232 | RFI /* enables MMU */ | ||
233 | |||
234 | /* | ||
235 | * We need __secondary_hold as a place to hold the other cpus on | ||
236 | * an SMP machine, even when we are running a UP kernel. | ||
237 | */ | ||
238 | . = 0xc0 /* for prep bootloader */ | ||
239 | li r3,1 /* MTX only has 1 cpu */ | ||
240 | .globl __secondary_hold | ||
241 | __secondary_hold: | ||
242 | /* tell the master we're here */ | ||
243 | stw r3,4(0) | ||
244 | #ifdef CONFIG_SMP | ||
245 | 100: lwz r4,0(0) | ||
246 | /* wait until we're told to start */ | ||
247 | cmpw 0,r4,r3 | ||
248 | bne 100b | ||
249 | /* our cpu # was at addr 0 - go */ | ||
250 | mr r24,r3 /* cpu # */ | ||
251 | b __secondary_start | ||
252 | #else | ||
253 | b . | ||
254 | #endif /* CONFIG_SMP */ | ||
255 | |||
256 | /* | ||
257 | * Exception entry code. This code runs with address translation | ||
258 | * turned off, i.e. using physical addresses. | ||
259 | * We assume sprg3 has the physical address of the current | ||
260 | * task's thread_struct. | ||
261 | */ | ||
262 | #define EXCEPTION_PROLOG \ | ||
263 | mtspr SPRN_SPRG0,r10; \ | ||
264 | mtspr SPRN_SPRG1,r11; \ | ||
265 | mfcr r10; \ | ||
266 | EXCEPTION_PROLOG_1; \ | ||
267 | EXCEPTION_PROLOG_2 | ||
268 | |||
269 | #define EXCEPTION_PROLOG_1 \ | ||
270 | mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \ | ||
271 | andi. r11,r11,MSR_PR; \ | ||
272 | tophys(r11,r1); /* use tophys(r1) if kernel */ \ | ||
273 | beq 1f; \ | ||
274 | mfspr r11,SPRN_SPRG3; \ | ||
275 | lwz r11,THREAD_INFO-THREAD(r11); \ | ||
276 | addi r11,r11,THREAD_SIZE; \ | ||
277 | tophys(r11,r11); \ | ||
278 | 1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */ | ||
279 | |||
280 | |||
281 | #define EXCEPTION_PROLOG_2 \ | ||
282 | CLR_TOP32(r11); \ | ||
283 | stw r10,_CCR(r11); /* save registers */ \ | ||
284 | stw r12,GPR12(r11); \ | ||
285 | stw r9,GPR9(r11); \ | ||
286 | mfspr r10,SPRN_SPRG0; \ | ||
287 | stw r10,GPR10(r11); \ | ||
288 | mfspr r12,SPRN_SPRG1; \ | ||
289 | stw r12,GPR11(r11); \ | ||
290 | mflr r10; \ | ||
291 | stw r10,_LINK(r11); \ | ||
292 | mfspr r12,SPRN_SRR0; \ | ||
293 | mfspr r9,SPRN_SRR1; \ | ||
294 | stw r1,GPR1(r11); \ | ||
295 | stw r1,0(r11); \ | ||
296 | tovirt(r1,r11); /* set new kernel sp */ \ | ||
297 | li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ | ||
298 | MTMSRD(r10); /* (except for mach check in rtas) */ \ | ||
299 | stw r0,GPR0(r11); \ | ||
300 | SAVE_4GPRS(3, r11); \ | ||
301 | SAVE_2GPRS(7, r11) | ||
302 | |||
303 | /* | ||
304 | * Note: code which follows this uses cr0.eq (set if from kernel), | ||
305 | * r11, r12 (SRR0), and r9 (SRR1). | ||
306 | * | ||
307 | * Note2: once we have set r1 we are in a position to take exceptions | ||
308 | * again, and we could thus set MSR:RI at that point. | ||
309 | */ | ||
310 | |||
311 | /* | ||
312 | * Exception vectors. | ||
313 | */ | ||
314 | #define EXCEPTION(n, label, hdlr, xfer) \ | ||
315 | . = n; \ | ||
316 | label: \ | ||
317 | EXCEPTION_PROLOG; \ | ||
318 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
319 | xfer(n, hdlr) | ||
320 | |||
321 | #define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \ | ||
322 | li r10,trap; \ | ||
323 | stw r10,TRAP(r11); \ | ||
324 | li r10,MSR_KERNEL; \ | ||
325 | copyee(r10, r9); \ | ||
326 | bl tfer; \ | ||
327 | i##n: \ | ||
328 | .long hdlr; \ | ||
329 | .long ret | ||
330 | |||
331 | #define COPY_EE(d, s) rlwimi d,s,0,16,16 | ||
332 | #define NOCOPY(d, s) | ||
333 | |||
334 | #define EXC_XFER_STD(n, hdlr) \ | ||
335 | EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \ | ||
336 | ret_from_except_full) | ||
337 | |||
338 | #define EXC_XFER_LITE(n, hdlr) \ | ||
339 | EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \ | ||
340 | ret_from_except) | ||
341 | |||
342 | #define EXC_XFER_EE(n, hdlr) \ | ||
343 | EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \ | ||
344 | ret_from_except_full) | ||
345 | |||
346 | #define EXC_XFER_EE_LITE(n, hdlr) \ | ||
347 | EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \ | ||
348 | ret_from_except) | ||
349 | |||
350 | /* System reset */ | ||
351 | /* core99 pmac starts the seconary here by changing the vector, and | ||
352 | putting it back to what it was (UnknownException) when done. */ | ||
353 | #if defined(CONFIG_GEMINI) && defined(CONFIG_SMP) | ||
354 | . = 0x100 | ||
355 | b __secondary_start_gemini | ||
356 | #else | ||
357 | EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD) | ||
358 | #endif | ||
359 | |||
360 | /* Machine check */ | ||
361 | /* | ||
362 | * On CHRP, this is complicated by the fact that we could get a | ||
363 | * machine check inside RTAS, and we have no guarantee that certain | ||
364 | * critical registers will have the values we expect. The set of | ||
365 | * registers that might have bad values includes all the GPRs | ||
366 | * and all the BATs. We indicate that we are in RTAS by putting | ||
367 | * a non-zero value, the address of the exception frame to use, | ||
368 | * in SPRG2. The machine check handler checks SPRG2 and uses its | ||
369 | * value if it is non-zero. If we ever needed to free up SPRG2, | ||
370 | * we could use a field in the thread_info or thread_struct instead. | ||
371 | * (Other exception handlers assume that r1 is a valid kernel stack | ||
372 | * pointer when we take an exception from supervisor mode.) | ||
373 | * -- paulus. | ||
374 | */ | ||
375 | . = 0x200 | ||
376 | mtspr SPRN_SPRG0,r10 | ||
377 | mtspr SPRN_SPRG1,r11 | ||
378 | mfcr r10 | ||
379 | #ifdef CONFIG_PPC_CHRP | ||
380 | mfspr r11,SPRN_SPRG2 | ||
381 | cmpwi 0,r11,0 | ||
382 | bne 7f | ||
383 | #endif /* CONFIG_PPC_CHRP */ | ||
384 | EXCEPTION_PROLOG_1 | ||
385 | 7: EXCEPTION_PROLOG_2 | ||
386 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
387 | #ifdef CONFIG_PPC_CHRP | ||
388 | mfspr r4,SPRN_SPRG2 | ||
389 | cmpwi cr1,r4,0 | ||
390 | bne cr1,1f | ||
391 | #endif | ||
392 | EXC_XFER_STD(0x200, MachineCheckException) | ||
393 | #ifdef CONFIG_PPC_CHRP | ||
394 | 1: b machine_check_in_rtas | ||
395 | #endif | ||
396 | |||
397 | /* Data access exception. */ | ||
398 | . = 0x300 | ||
399 | #ifdef CONFIG_PPC64BRIDGE | ||
400 | b DataAccess | ||
401 | DataAccessCont: | ||
402 | #else | ||
403 | DataAccess: | ||
404 | EXCEPTION_PROLOG | ||
405 | #endif /* CONFIG_PPC64BRIDGE */ | ||
406 | mfspr r10,SPRN_DSISR | ||
407 | andis. r0,r10,0xa470 /* weird error? */ | ||
408 | bne 1f /* if not, try to put a PTE */ | ||
409 | mfspr r4,SPRN_DAR /* into the hash table */ | ||
410 | rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ | ||
411 | bl hash_page | ||
412 | 1: stw r10,_DSISR(r11) | ||
413 | mr r5,r10 | ||
414 | mfspr r4,SPRN_DAR | ||
415 | EXC_XFER_EE_LITE(0x300, handle_page_fault) | ||
416 | |||
417 | #ifdef CONFIG_PPC64BRIDGE | ||
418 | /* SLB fault on data access. */ | ||
419 | . = 0x380 | ||
420 | b DataSegment | ||
421 | #endif /* CONFIG_PPC64BRIDGE */ | ||
422 | |||
423 | /* Instruction access exception. */ | ||
424 | . = 0x400 | ||
425 | #ifdef CONFIG_PPC64BRIDGE | ||
426 | b InstructionAccess | ||
427 | InstructionAccessCont: | ||
428 | #else | ||
429 | InstructionAccess: | ||
430 | EXCEPTION_PROLOG | ||
431 | #endif /* CONFIG_PPC64BRIDGE */ | ||
432 | andis. r0,r9,0x4000 /* no pte found? */ | ||
433 | beq 1f /* if so, try to put a PTE */ | ||
434 | li r3,0 /* into the hash table */ | ||
435 | mr r4,r12 /* SRR0 is fault address */ | ||
436 | bl hash_page | ||
437 | 1: mr r4,r12 | ||
438 | mr r5,r9 | ||
439 | EXC_XFER_EE_LITE(0x400, handle_page_fault) | ||
440 | |||
441 | #ifdef CONFIG_PPC64BRIDGE | ||
442 | /* SLB fault on instruction access. */ | ||
443 | . = 0x480 | ||
444 | b InstructionSegment | ||
445 | #endif /* CONFIG_PPC64BRIDGE */ | ||
446 | |||
447 | /* External interrupt */ | ||
448 | EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) | ||
449 | |||
450 | /* Alignment exception */ | ||
451 | . = 0x600 | ||
452 | Alignment: | ||
453 | EXCEPTION_PROLOG | ||
454 | mfspr r4,SPRN_DAR | ||
455 | stw r4,_DAR(r11) | ||
456 | mfspr r5,SPRN_DSISR | ||
457 | stw r5,_DSISR(r11) | ||
458 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
459 | EXC_XFER_EE(0x600, AlignmentException) | ||
460 | |||
461 | /* Program check exception */ | ||
462 | EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD) | ||
463 | |||
464 | /* Floating-point unavailable */ | ||
465 | . = 0x800 | ||
466 | FPUnavailable: | ||
467 | EXCEPTION_PROLOG | ||
468 | bne load_up_fpu /* if from user, just load it up */ | ||
469 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
470 | EXC_XFER_EE_LITE(0x800, KernelFP) | ||
471 | |||
472 | /* Decrementer */ | ||
473 | EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) | ||
474 | |||
475 | EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE) | ||
476 | EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE) | ||
477 | |||
478 | /* System call */ | ||
479 | . = 0xc00 | ||
480 | SystemCall: | ||
481 | EXCEPTION_PROLOG | ||
482 | EXC_XFER_EE_LITE(0xc00, DoSyscall) | ||
483 | |||
484 | /* Single step - not used on 601 */ | ||
485 | EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD) | ||
486 | EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE) | ||
487 | |||
488 | /* | ||
489 | * The Altivec unavailable trap is at 0x0f20. Foo. | ||
490 | * We effectively remap it to 0x3000. | ||
491 | * We include an altivec unavailable exception vector even if | ||
492 | * not configured for Altivec, so that you can't panic a | ||
493 | * non-altivec kernel running on a machine with altivec just | ||
494 | * by executing an altivec instruction. | ||
495 | */ | ||
496 | . = 0xf00 | ||
497 | b Trap_0f | ||
498 | |||
499 | . = 0xf20 | ||
500 | b AltiVecUnavailable | ||
501 | |||
502 | Trap_0f: | ||
503 | EXCEPTION_PROLOG | ||
504 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
505 | EXC_XFER_EE(0xf00, UnknownException) | ||
506 | |||
507 | /* | ||
508 | * Handle TLB miss for instruction on 603/603e. | ||
509 | * Note: we get an alternate set of r0 - r3 to use automatically. | ||
510 | */ | ||
511 | . = 0x1000 | ||
512 | InstructionTLBMiss: | ||
513 | /* | ||
514 | * r0: stored ctr | ||
515 | * r1: linux style pte ( later becomes ppc hardware pte ) | ||
516 | * r2: ptr to linux-style pte | ||
517 | * r3: scratch | ||
518 | */ | ||
519 | mfctr r0 | ||
520 | /* Get PTE (linux-style) and check access */ | ||
521 | mfspr r3,SPRN_IMISS | ||
522 | lis r1,KERNELBASE@h /* check if kernel address */ | ||
523 | cmplw 0,r3,r1 | ||
524 | mfspr r2,SPRN_SPRG3 | ||
525 | li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ | ||
526 | lwz r2,PGDIR(r2) | ||
527 | blt+ 112f | ||
528 | lis r2,swapper_pg_dir@ha /* if kernel address, use */ | ||
529 | addi r2,r2,swapper_pg_dir@l /* kernel page table */ | ||
530 | mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ | ||
531 | rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ | ||
532 | 112: tophys(r2,r2) | ||
533 | rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ | ||
534 | lwz r2,0(r2) /* get pmd entry */ | ||
535 | rlwinm. r2,r2,0,0,19 /* extract address of pte page */ | ||
536 | beq- InstructionAddressInvalid /* return if no mapping */ | ||
537 | rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ | ||
538 | lwz r3,0(r2) /* get linux-style pte */ | ||
539 | andc. r1,r1,r3 /* check access & ~permission */ | ||
540 | bne- InstructionAddressInvalid /* return if access not permitted */ | ||
541 | ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ | ||
542 | /* | ||
543 | * NOTE! We are assuming this is not an SMP system, otherwise | ||
544 | * we would need to update the pte atomically with lwarx/stwcx. | ||
545 | */ | ||
546 | stw r3,0(r2) /* update PTE (accessed bit) */ | ||
547 | /* Convert linux-style PTE to low word of PPC-style PTE */ | ||
548 | rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ | ||
549 | rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ | ||
550 | and r1,r1,r2 /* writable if _RW and _DIRTY */ | ||
551 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | ||
552 | rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ | ||
553 | ori r1,r1,0xe14 /* clear out reserved bits and M */ | ||
554 | andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ | ||
555 | mtspr SPRN_RPA,r1 | ||
556 | mfspr r3,SPRN_IMISS | ||
557 | tlbli r3 | ||
558 | mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ | ||
559 | mtcrf 0x80,r3 | ||
560 | rfi | ||
561 | InstructionAddressInvalid: | ||
562 | mfspr r3,SPRN_SRR1 | ||
563 | rlwinm r1,r3,9,6,6 /* Get load/store bit */ | ||
564 | |||
565 | addis r1,r1,0x2000 | ||
566 | mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */ | ||
567 | mtctr r0 /* Restore CTR */ | ||
568 | andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ | ||
569 | or r2,r2,r1 | ||
570 | mtspr SPRN_SRR1,r2 | ||
571 | mfspr r1,SPRN_IMISS /* Get failing address */ | ||
572 | rlwinm. r2,r2,0,31,31 /* Check for little endian access */ | ||
573 | rlwimi r2,r2,1,30,30 /* change 1 -> 3 */ | ||
574 | xor r1,r1,r2 | ||
575 | mtspr SPRN_DAR,r1 /* Set fault address */ | ||
576 | mfmsr r0 /* Restore "normal" registers */ | ||
577 | xoris r0,r0,MSR_TGPR>>16 | ||
578 | mtcrf 0x80,r3 /* Restore CR0 */ | ||
579 | mtmsr r0 | ||
580 | b InstructionAccess | ||
581 | |||
582 | /* | ||
583 | * Handle TLB miss for DATA Load operation on 603/603e | ||
584 | */ | ||
585 | . = 0x1100 | ||
586 | DataLoadTLBMiss: | ||
587 | /* | ||
588 | * r0: stored ctr | ||
589 | * r1: linux style pte ( later becomes ppc hardware pte ) | ||
590 | * r2: ptr to linux-style pte | ||
591 | * r3: scratch | ||
592 | */ | ||
593 | mfctr r0 | ||
594 | /* Get PTE (linux-style) and check access */ | ||
595 | mfspr r3,SPRN_DMISS | ||
596 | lis r1,KERNELBASE@h /* check if kernel address */ | ||
597 | cmplw 0,r3,r1 | ||
598 | mfspr r2,SPRN_SPRG3 | ||
599 | li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ | ||
600 | lwz r2,PGDIR(r2) | ||
601 | blt+ 112f | ||
602 | lis r2,swapper_pg_dir@ha /* if kernel address, use */ | ||
603 | addi r2,r2,swapper_pg_dir@l /* kernel page table */ | ||
604 | mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ | ||
605 | rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ | ||
606 | 112: tophys(r2,r2) | ||
607 | rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ | ||
608 | lwz r2,0(r2) /* get pmd entry */ | ||
609 | rlwinm. r2,r2,0,0,19 /* extract address of pte page */ | ||
610 | beq- DataAddressInvalid /* return if no mapping */ | ||
611 | rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ | ||
612 | lwz r3,0(r2) /* get linux-style pte */ | ||
613 | andc. r1,r1,r3 /* check access & ~permission */ | ||
614 | bne- DataAddressInvalid /* return if access not permitted */ | ||
615 | ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ | ||
616 | /* | ||
617 | * NOTE! We are assuming this is not an SMP system, otherwise | ||
618 | * we would need to update the pte atomically with lwarx/stwcx. | ||
619 | */ | ||
620 | stw r3,0(r2) /* update PTE (accessed bit) */ | ||
621 | /* Convert linux-style PTE to low word of PPC-style PTE */ | ||
622 | rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ | ||
623 | rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ | ||
624 | and r1,r1,r2 /* writable if _RW and _DIRTY */ | ||
625 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | ||
626 | rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ | ||
627 | ori r1,r1,0xe14 /* clear out reserved bits and M */ | ||
628 | andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ | ||
629 | mtspr SPRN_RPA,r1 | ||
630 | mfspr r3,SPRN_DMISS | ||
631 | tlbld r3 | ||
632 | mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ | ||
633 | mtcrf 0x80,r3 | ||
634 | rfi | ||
635 | DataAddressInvalid: | ||
636 | mfspr r3,SPRN_SRR1 | ||
637 | rlwinm r1,r3,9,6,6 /* Get load/store bit */ | ||
638 | addis r1,r1,0x2000 | ||
639 | mtspr SPRN_DSISR,r1 | ||
640 | mtctr r0 /* Restore CTR */ | ||
641 | andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ | ||
642 | mtspr SPRN_SRR1,r2 | ||
643 | mfspr r1,SPRN_DMISS /* Get failing address */ | ||
644 | rlwinm. r2,r2,0,31,31 /* Check for little endian access */ | ||
645 | beq 20f /* Jump if big endian */ | ||
646 | xori r1,r1,3 | ||
647 | 20: mtspr SPRN_DAR,r1 /* Set fault address */ | ||
648 | mfmsr r0 /* Restore "normal" registers */ | ||
649 | xoris r0,r0,MSR_TGPR>>16 | ||
650 | mtcrf 0x80,r3 /* Restore CR0 */ | ||
651 | mtmsr r0 | ||
652 | b DataAccess | ||
653 | |||
654 | /* | ||
655 | * Handle TLB miss for DATA Store on 603/603e | ||
656 | */ | ||
657 | . = 0x1200 | ||
658 | DataStoreTLBMiss: | ||
659 | /* | ||
660 | * r0: stored ctr | ||
661 | * r1: linux style pte ( later becomes ppc hardware pte ) | ||
662 | * r2: ptr to linux-style pte | ||
663 | * r3: scratch | ||
664 | */ | ||
665 | mfctr r0 | ||
666 | /* Get PTE (linux-style) and check access */ | ||
667 | mfspr r3,SPRN_DMISS | ||
668 | lis r1,KERNELBASE@h /* check if kernel address */ | ||
669 | cmplw 0,r3,r1 | ||
670 | mfspr r2,SPRN_SPRG3 | ||
671 | li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */ | ||
672 | lwz r2,PGDIR(r2) | ||
673 | blt+ 112f | ||
674 | lis r2,swapper_pg_dir@ha /* if kernel address, use */ | ||
675 | addi r2,r2,swapper_pg_dir@l /* kernel page table */ | ||
676 | mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ | ||
677 | rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ | ||
678 | 112: tophys(r2,r2) | ||
679 | rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ | ||
680 | lwz r2,0(r2) /* get pmd entry */ | ||
681 | rlwinm. r2,r2,0,0,19 /* extract address of pte page */ | ||
682 | beq- DataAddressInvalid /* return if no mapping */ | ||
683 | rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ | ||
684 | lwz r3,0(r2) /* get linux-style pte */ | ||
685 | andc. r1,r1,r3 /* check access & ~permission */ | ||
686 | bne- DataAddressInvalid /* return if access not permitted */ | ||
687 | ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY | ||
688 | /* | ||
689 | * NOTE! We are assuming this is not an SMP system, otherwise | ||
690 | * we would need to update the pte atomically with lwarx/stwcx. | ||
691 | */ | ||
692 | stw r3,0(r2) /* update PTE (accessed/dirty bits) */ | ||
693 | /* Convert linux-style PTE to low word of PPC-style PTE */ | ||
694 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | ||
695 | li r1,0xe15 /* clear out reserved bits and M */ | ||
696 | andc r1,r3,r1 /* PP = user? 2: 0 */ | ||
697 | mtspr SPRN_RPA,r1 | ||
698 | mfspr r3,SPRN_DMISS | ||
699 | tlbld r3 | ||
700 | mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ | ||
701 | mtcrf 0x80,r3 | ||
702 | rfi | ||
703 | |||
704 | #ifndef CONFIG_ALTIVEC | ||
705 | #define AltivecAssistException UnknownException | ||
706 | #endif | ||
707 | |||
708 | EXCEPTION(0x1300, Trap_13, InstructionBreakpoint, EXC_XFER_EE) | ||
709 | EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE) | ||
710 | EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) | ||
711 | #ifdef CONFIG_POWER4 | ||
712 | EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) | ||
713 | EXCEPTION(0x1700, Trap_17, AltivecAssistException, EXC_XFER_EE) | ||
714 | EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD) | ||
715 | #else /* !CONFIG_POWER4 */ | ||
716 | EXCEPTION(0x1600, Trap_16, AltivecAssistException, EXC_XFER_EE) | ||
717 | EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD) | ||
718 | EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) | ||
719 | #endif /* CONFIG_POWER4 */ | ||
720 | EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) | ||
721 | EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE) | ||
722 | EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE) | ||
723 | EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE) | ||
724 | EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE) | ||
725 | EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE) | ||
726 | EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE) | ||
727 | EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE) | ||
728 | EXCEPTION(0x2100, Trap_21, UnknownException, EXC_XFER_EE) | ||
729 | EXCEPTION(0x2200, Trap_22, UnknownException, EXC_XFER_EE) | ||
730 | EXCEPTION(0x2300, Trap_23, UnknownException, EXC_XFER_EE) | ||
731 | EXCEPTION(0x2400, Trap_24, UnknownException, EXC_XFER_EE) | ||
732 | EXCEPTION(0x2500, Trap_25, UnknownException, EXC_XFER_EE) | ||
733 | EXCEPTION(0x2600, Trap_26, UnknownException, EXC_XFER_EE) | ||
734 | EXCEPTION(0x2700, Trap_27, UnknownException, EXC_XFER_EE) | ||
735 | EXCEPTION(0x2800, Trap_28, UnknownException, EXC_XFER_EE) | ||
736 | EXCEPTION(0x2900, Trap_29, UnknownException, EXC_XFER_EE) | ||
737 | EXCEPTION(0x2a00, Trap_2a, UnknownException, EXC_XFER_EE) | ||
738 | EXCEPTION(0x2b00, Trap_2b, UnknownException, EXC_XFER_EE) | ||
739 | EXCEPTION(0x2c00, Trap_2c, UnknownException, EXC_XFER_EE) | ||
740 | EXCEPTION(0x2d00, Trap_2d, UnknownException, EXC_XFER_EE) | ||
741 | EXCEPTION(0x2e00, Trap_2e, UnknownException, EXC_XFER_EE) | ||
742 | EXCEPTION(0x2f00, MOLTrampoline, UnknownException, EXC_XFER_EE_LITE) | ||
743 | |||
744 | .globl mol_trampoline | ||
745 | .set mol_trampoline, i0x2f00 | ||
746 | |||
747 | . = 0x3000 | ||
748 | |||
749 | AltiVecUnavailable: | ||
750 | EXCEPTION_PROLOG | ||
751 | #ifdef CONFIG_ALTIVEC | ||
752 | bne load_up_altivec /* if from user, just load it up */ | ||
753 | #endif /* CONFIG_ALTIVEC */ | ||
754 | EXC_XFER_EE_LITE(0xf20, AltivecUnavailException) | ||
755 | |||
756 | #ifdef CONFIG_PPC64BRIDGE | ||
757 | DataAccess: | ||
758 | EXCEPTION_PROLOG | ||
759 | b DataAccessCont | ||
760 | |||
761 | InstructionAccess: | ||
762 | EXCEPTION_PROLOG | ||
763 | b InstructionAccessCont | ||
764 | |||
765 | DataSegment: | ||
766 | EXCEPTION_PROLOG | ||
767 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
768 | mfspr r4,SPRN_DAR | ||
769 | stw r4,_DAR(r11) | ||
770 | EXC_XFER_STD(0x380, UnknownException) | ||
771 | |||
772 | InstructionSegment: | ||
773 | EXCEPTION_PROLOG | ||
774 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
775 | EXC_XFER_STD(0x480, UnknownException) | ||
776 | #endif /* CONFIG_PPC64BRIDGE */ | ||
777 | |||
778 | /* | ||
779 | * This task wants to use the FPU now. | ||
780 | * On UP, disable FP for the task which had the FPU previously, | ||
781 | * and save its floating-point registers in its thread_struct. | ||
782 | * Load up this task's FP registers from its thread_struct, | ||
783 | * enable the FPU for the current task and return to the task. | ||
784 | */ | ||
785 | load_up_fpu: | ||
786 | mfmsr r5 | ||
787 | ori r5,r5,MSR_FP | ||
788 | #ifdef CONFIG_PPC64BRIDGE | ||
789 | clrldi r5,r5,1 /* turn off 64-bit mode */ | ||
790 | #endif /* CONFIG_PPC64BRIDGE */ | ||
791 | SYNC | ||
792 | MTMSRD(r5) /* enable use of fpu now */ | ||
793 | isync | ||
794 | /* | ||
795 | * For SMP, we don't do lazy FPU switching because it just gets too | ||
796 | * horrendously complex, especially when a task switches from one CPU | ||
797 | * to another. Instead we call giveup_fpu in switch_to. | ||
798 | */ | ||
799 | #ifndef CONFIG_SMP | ||
800 | tophys(r6,0) /* get __pa constant */ | ||
801 | addis r3,r6,last_task_used_math@ha | ||
802 | lwz r4,last_task_used_math@l(r3) | ||
803 | cmpwi 0,r4,0 | ||
804 | beq 1f | ||
805 | add r4,r4,r6 | ||
806 | addi r4,r4,THREAD /* want last_task_used_math->thread */ | ||
807 | SAVE_32FPRS(0, r4) | ||
808 | mffs fr0 | ||
809 | stfd fr0,THREAD_FPSCR-4(r4) | ||
810 | lwz r5,PT_REGS(r4) | ||
811 | add r5,r5,r6 | ||
812 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
813 | li r10,MSR_FP|MSR_FE0|MSR_FE1 | ||
814 | andc r4,r4,r10 /* disable FP for previous task */ | ||
815 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
816 | 1: | ||
817 | #endif /* CONFIG_SMP */ | ||
818 | /* enable use of FP after return */ | ||
819 | mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ | ||
820 | lwz r4,THREAD_FPEXC_MODE(r5) | ||
821 | ori r9,r9,MSR_FP /* enable FP for current */ | ||
822 | or r9,r9,r4 | ||
823 | lfd fr0,THREAD_FPSCR-4(r5) | ||
824 | mtfsf 0xff,fr0 | ||
825 | REST_32FPRS(0, r5) | ||
826 | #ifndef CONFIG_SMP | ||
827 | subi r4,r5,THREAD | ||
828 | sub r4,r4,r6 | ||
829 | stw r4,last_task_used_math@l(r3) | ||
830 | #endif /* CONFIG_SMP */ | ||
831 | /* restore registers and return */ | ||
832 | /* we haven't used ctr or xer or lr */ | ||
833 | /* fall through to fast_exception_return */ | ||
834 | |||
835 | .globl fast_exception_return | ||
836 | fast_exception_return: | ||
837 | andi. r10,r9,MSR_RI /* check for recoverable interrupt */ | ||
838 | beq 1f /* if not, we've got problems */ | ||
839 | 2: REST_4GPRS(3, r11) | ||
840 | lwz r10,_CCR(r11) | ||
841 | REST_GPR(1, r11) | ||
842 | mtcr r10 | ||
843 | lwz r10,_LINK(r11) | ||
844 | mtlr r10 | ||
845 | REST_GPR(10, r11) | ||
846 | mtspr SPRN_SRR1,r9 | ||
847 | mtspr SPRN_SRR0,r12 | ||
848 | REST_GPR(9, r11) | ||
849 | REST_GPR(12, r11) | ||
850 | lwz r11,GPR11(r11) | ||
851 | SYNC | ||
852 | RFI | ||
853 | |||
854 | /* check if the exception happened in a restartable section */ | ||
855 | 1: lis r3,exc_exit_restart_end@ha | ||
856 | addi r3,r3,exc_exit_restart_end@l | ||
857 | cmplw r12,r3 | ||
858 | bge 3f | ||
859 | lis r4,exc_exit_restart@ha | ||
860 | addi r4,r4,exc_exit_restart@l | ||
861 | cmplw r12,r4 | ||
862 | blt 3f | ||
863 | lis r3,fee_restarts@ha | ||
864 | tophys(r3,r3) | ||
865 | lwz r5,fee_restarts@l(r3) | ||
866 | addi r5,r5,1 | ||
867 | stw r5,fee_restarts@l(r3) | ||
868 | mr r12,r4 /* restart at exc_exit_restart */ | ||
869 | b 2b | ||
870 | |||
871 | .comm fee_restarts,4 | ||
872 | |||
873 | /* aargh, a nonrecoverable interrupt, panic */ | ||
874 | /* aargh, we don't know which trap this is */ | ||
875 | /* but the 601 doesn't implement the RI bit, so assume it's OK */ | ||
876 | 3: | ||
877 | BEGIN_FTR_SECTION | ||
878 | b 2b | ||
879 | END_FTR_SECTION_IFSET(CPU_FTR_601) | ||
880 | li r10,-1 | ||
881 | stw r10,TRAP(r11) | ||
882 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
883 | li r10,MSR_KERNEL | ||
884 | bl transfer_to_handler_full | ||
885 | .long nonrecoverable_exception | ||
886 | .long ret_from_except | ||
887 | |||
888 | /* | ||
889 | * FP unavailable trap from kernel - print a message, but let | ||
890 | * the task use FP in the kernel until it returns to user mode. | ||
891 | */ | ||
892 | KernelFP: | ||
893 | lwz r3,_MSR(r1) | ||
894 | ori r3,r3,MSR_FP | ||
895 | stw r3,_MSR(r1) /* enable use of FP after return */ | ||
896 | lis r3,86f@h | ||
897 | ori r3,r3,86f@l | ||
898 | mr r4,r2 /* current */ | ||
899 | lwz r5,_NIP(r1) | ||
900 | bl printk | ||
901 | b ret_from_except | ||
902 | 86: .string "floating point used in kernel (task=%p, pc=%x)\n" | ||
903 | .align 4,0 | ||
904 | |||
905 | #ifdef CONFIG_ALTIVEC | ||
906 | /* Note that the AltiVec support is closely modeled after the FP | ||
907 | * support. Changes to one are likely to be applicable to the | ||
908 | * other! */ | ||
909 | load_up_altivec: | ||
910 | /* | ||
911 | * Disable AltiVec for the task which had AltiVec previously, | ||
912 | * and save its AltiVec registers in its thread_struct. | ||
913 | * Enables AltiVec for use in the kernel on return. | ||
914 | * On SMP we know the AltiVec units are free, since we give it up every | ||
915 | * switch. -- Kumar | ||
916 | */ | ||
917 | mfmsr r5 | ||
918 | oris r5,r5,MSR_VEC@h | ||
919 | MTMSRD(r5) /* enable use of AltiVec now */ | ||
920 | isync | ||
921 | /* | ||
922 | * For SMP, we don't do lazy AltiVec switching because it just gets too | ||
923 | * horrendously complex, especially when a task switches from one CPU | ||
924 | * to another. Instead we call giveup_altivec in switch_to. | ||
925 | */ | ||
926 | #ifndef CONFIG_SMP | ||
927 | tophys(r6,0) | ||
928 | addis r3,r6,last_task_used_altivec@ha | ||
929 | lwz r4,last_task_used_altivec@l(r3) | ||
930 | cmpwi 0,r4,0 | ||
931 | beq 1f | ||
932 | add r4,r4,r6 | ||
933 | addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */ | ||
934 | SAVE_32VR(0,r10,r4) | ||
935 | mfvscr vr0 | ||
936 | li r10,THREAD_VSCR | ||
937 | stvx vr0,r10,r4 | ||
938 | lwz r5,PT_REGS(r4) | ||
939 | add r5,r5,r6 | ||
940 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
941 | lis r10,MSR_VEC@h | ||
942 | andc r4,r4,r10 /* disable altivec for previous task */ | ||
943 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
944 | 1: | ||
945 | #endif /* CONFIG_SMP */ | ||
946 | /* enable use of AltiVec after return */ | ||
947 | oris r9,r9,MSR_VEC@h | ||
948 | mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ | ||
949 | li r4,1 | ||
950 | li r10,THREAD_VSCR | ||
951 | stw r4,THREAD_USED_VR(r5) | ||
952 | lvx vr0,r10,r5 | ||
953 | mtvscr vr0 | ||
954 | REST_32VR(0,r10,r5) | ||
955 | #ifndef CONFIG_SMP | ||
956 | subi r4,r5,THREAD | ||
957 | sub r4,r4,r6 | ||
958 | stw r4,last_task_used_altivec@l(r3) | ||
959 | #endif /* CONFIG_SMP */ | ||
960 | /* restore registers and return */ | ||
961 | /* we haven't used ctr or xer or lr */ | ||
962 | b fast_exception_return | ||
963 | |||
964 | /* | ||
965 | * AltiVec unavailable trap from kernel - print a message, but let | ||
966 | * the task use AltiVec in the kernel until it returns to user mode. | ||
967 | */ | ||
968 | KernelAltiVec: | ||
969 | lwz r3,_MSR(r1) | ||
970 | oris r3,r3,MSR_VEC@h | ||
971 | stw r3,_MSR(r1) /* enable use of AltiVec after return */ | ||
972 | lis r3,87f@h | ||
973 | ori r3,r3,87f@l | ||
974 | mr r4,r2 /* current */ | ||
975 | lwz r5,_NIP(r1) | ||
976 | bl printk | ||
977 | b ret_from_except | ||
978 | 87: .string "AltiVec used in kernel (task=%p, pc=%x) \n" | ||
979 | .align 4,0 | ||
980 | |||
981 | /* | ||
982 | * giveup_altivec(tsk) | ||
983 | * Disable AltiVec for the task given as the argument, | ||
984 | * and save the AltiVec registers in its thread_struct. | ||
985 | * Enables AltiVec for use in the kernel on return. | ||
986 | */ | ||
987 | |||
988 | .globl giveup_altivec | ||
989 | giveup_altivec: | ||
990 | mfmsr r5 | ||
991 | oris r5,r5,MSR_VEC@h | ||
992 | SYNC | ||
993 | MTMSRD(r5) /* enable use of AltiVec now */ | ||
994 | isync | ||
995 | cmpwi 0,r3,0 | ||
996 | beqlr- /* if no previous owner, done */ | ||
997 | addi r3,r3,THREAD /* want THREAD of task */ | ||
998 | lwz r5,PT_REGS(r3) | ||
999 | cmpwi 0,r5,0 | ||
1000 | SAVE_32VR(0, r4, r3) | ||
1001 | mfvscr vr0 | ||
1002 | li r4,THREAD_VSCR | ||
1003 | stvx vr0,r4,r3 | ||
1004 | beq 1f | ||
1005 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
1006 | lis r3,MSR_VEC@h | ||
1007 | andc r4,r4,r3 /* disable AltiVec for previous task */ | ||
1008 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
1009 | 1: | ||
1010 | #ifndef CONFIG_SMP | ||
1011 | li r5,0 | ||
1012 | lis r4,last_task_used_altivec@ha | ||
1013 | stw r5,last_task_used_altivec@l(r4) | ||
1014 | #endif /* CONFIG_SMP */ | ||
1015 | blr | ||
1016 | #endif /* CONFIG_ALTIVEC */ | ||
1017 | |||
1018 | /* | ||
1019 | * giveup_fpu(tsk) | ||
1020 | * Disable FP for the task given as the argument, | ||
1021 | * and save the floating-point registers in its thread_struct. | ||
1022 | * Enables the FPU for use in the kernel on return. | ||
1023 | */ | ||
1024 | .globl giveup_fpu | ||
1025 | giveup_fpu: | ||
1026 | mfmsr r5 | ||
1027 | ori r5,r5,MSR_FP | ||
1028 | SYNC_601 | ||
1029 | ISYNC_601 | ||
1030 | MTMSRD(r5) /* enable use of fpu now */ | ||
1031 | SYNC_601 | ||
1032 | isync | ||
1033 | cmpwi 0,r3,0 | ||
1034 | beqlr- /* if no previous owner, done */ | ||
1035 | addi r3,r3,THREAD /* want THREAD of task */ | ||
1036 | lwz r5,PT_REGS(r3) | ||
1037 | cmpwi 0,r5,0 | ||
1038 | SAVE_32FPRS(0, r3) | ||
1039 | mffs fr0 | ||
1040 | stfd fr0,THREAD_FPSCR-4(r3) | ||
1041 | beq 1f | ||
1042 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
1043 | li r3,MSR_FP|MSR_FE0|MSR_FE1 | ||
1044 | andc r4,r4,r3 /* disable FP for previous task */ | ||
1045 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
1046 | 1: | ||
1047 | #ifndef CONFIG_SMP | ||
1048 | li r5,0 | ||
1049 | lis r4,last_task_used_math@ha | ||
1050 | stw r5,last_task_used_math@l(r4) | ||
1051 | #endif /* CONFIG_SMP */ | ||
1052 | blr | ||
1053 | |||
1054 | /* | ||
1055 | * This code is jumped to from the startup code to copy | ||
1056 | * the kernel image to physical address 0. | ||
1057 | */ | ||
1058 | relocate_kernel: | ||
1059 | addis r9,r26,klimit@ha /* fetch klimit */ | ||
1060 | lwz r25,klimit@l(r9) | ||
1061 | addis r25,r25,-KERNELBASE@h | ||
1062 | li r3,0 /* Destination base address */ | ||
1063 | li r6,0 /* Destination offset */ | ||
1064 | li r5,0x4000 /* # bytes of memory to copy */ | ||
1065 | bl copy_and_flush /* copy the first 0x4000 bytes */ | ||
1066 | addi r0,r3,4f@l /* jump to the address of 4f */ | ||
1067 | mtctr r0 /* in copy and do the rest. */ | ||
1068 | bctr /* jump to the copy */ | ||
1069 | 4: mr r5,r25 | ||
1070 | bl copy_and_flush /* copy the rest */ | ||
1071 | b turn_on_mmu | ||
1072 | |||
1073 | /* | ||
1074 | * Copy routine used to copy the kernel to start at physical address 0 | ||
1075 | * and flush and invalidate the caches as needed. | ||
1076 | * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset | ||
1077 | * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. | ||
1078 | */ | ||
1079 | copy_and_flush: | ||
1080 | addi r5,r5,-4 | ||
1081 | addi r6,r6,-4 | ||
1082 | 4: li r0,L1_CACHE_LINE_SIZE/4 | ||
1083 | mtctr r0 | ||
1084 | 3: addi r6,r6,4 /* copy a cache line */ | ||
1085 | lwzx r0,r6,r4 | ||
1086 | stwx r0,r6,r3 | ||
1087 | bdnz 3b | ||
1088 | dcbst r6,r3 /* write it to memory */ | ||
1089 | sync | ||
1090 | icbi r6,r3 /* flush the icache line */ | ||
1091 | cmplw 0,r6,r5 | ||
1092 | blt 4b | ||
1093 | sync /* additional sync needed on g4 */ | ||
1094 | isync | ||
1095 | addi r5,r5,4 | ||
1096 | addi r6,r6,4 | ||
1097 | blr | ||
1098 | |||
1099 | #ifdef CONFIG_APUS | ||
1100 | /* | ||
1101 | * On APUS the physical base address of the kernel is not known at compile | ||
1102 | * time, which means the __pa/__va constants used are incorrect. In the | ||
1103 | * __init section is recorded the virtual addresses of instructions using | ||
1104 | * these constants, so all that has to be done is fix these before | ||
1105 | * continuing the kernel boot. | ||
1106 | * | ||
1107 | * r4 = The physical address of the kernel base. | ||
1108 | */ | ||
1109 | fix_mem_constants: | ||
1110 | mr r10,r4 | ||
1111 | addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */ | ||
1112 | neg r11,r10 /* phys_to_virt constant */ | ||
1113 | |||
1114 | lis r12,__vtop_table_begin@h | ||
1115 | ori r12,r12,__vtop_table_begin@l | ||
1116 | add r12,r12,r10 /* table begin phys address */ | ||
1117 | lis r13,__vtop_table_end@h | ||
1118 | ori r13,r13,__vtop_table_end@l | ||
1119 | add r13,r13,r10 /* table end phys address */ | ||
1120 | subi r12,r12,4 | ||
1121 | subi r13,r13,4 | ||
1122 | 1: lwzu r14,4(r12) /* virt address of instruction */ | ||
1123 | add r14,r14,r10 /* phys address of instruction */ | ||
1124 | lwz r15,0(r14) /* instruction, now insert top */ | ||
1125 | rlwimi r15,r10,16,16,31 /* half of vp const in low half */ | ||
1126 | stw r15,0(r14) /* of instruction and restore. */ | ||
1127 | dcbst r0,r14 /* write it to memory */ | ||
1128 | sync | ||
1129 | icbi r0,r14 /* flush the icache line */ | ||
1130 | cmpw r12,r13 | ||
1131 | bne 1b | ||
1132 | sync /* additional sync needed on g4 */ | ||
1133 | isync | ||
1134 | |||
1135 | /* | ||
1136 | * Map the memory where the exception handlers will | ||
1137 | * be copied to when hash constants have been patched. | ||
1138 | */ | ||
1139 | #ifdef CONFIG_APUS_FAST_EXCEPT | ||
1140 | lis r8,0xfff0 | ||
1141 | #else | ||
1142 | lis r8,0 | ||
1143 | #endif | ||
1144 | ori r8,r8,0x2 /* 128KB, supervisor */ | ||
1145 | mtspr SPRN_DBAT3U,r8 | ||
1146 | mtspr SPRN_DBAT3L,r8 | ||
1147 | |||
1148 | lis r12,__ptov_table_begin@h | ||
1149 | ori r12,r12,__ptov_table_begin@l | ||
1150 | add r12,r12,r10 /* table begin phys address */ | ||
1151 | lis r13,__ptov_table_end@h | ||
1152 | ori r13,r13,__ptov_table_end@l | ||
1153 | add r13,r13,r10 /* table end phys address */ | ||
1154 | subi r12,r12,4 | ||
1155 | subi r13,r13,4 | ||
1156 | 1: lwzu r14,4(r12) /* virt address of instruction */ | ||
1157 | add r14,r14,r10 /* phys address of instruction */ | ||
1158 | lwz r15,0(r14) /* instruction, now insert top */ | ||
1159 | rlwimi r15,r11,16,16,31 /* half of pv const in low half*/ | ||
1160 | stw r15,0(r14) /* of instruction and restore. */ | ||
1161 | dcbst r0,r14 /* write it to memory */ | ||
1162 | sync | ||
1163 | icbi r0,r14 /* flush the icache line */ | ||
1164 | cmpw r12,r13 | ||
1165 | bne 1b | ||
1166 | |||
1167 | sync /* additional sync needed on g4 */ | ||
1168 | isync /* No speculative loading until now */ | ||
1169 | blr | ||
1170 | |||
1171 | /*********************************************************************** | ||
1172 | * Please note that on APUS the exception handlers are located at the | ||
1173 | * physical address 0xfff0000. For this reason, the exception handlers | ||
1174 | * cannot use relative branches to access the code below. | ||
1175 | ***********************************************************************/ | ||
1176 | #endif /* CONFIG_APUS */ | ||
1177 | |||
1178 | #ifdef CONFIG_SMP | ||
1179 | #ifdef CONFIG_GEMINI | ||
1180 | .globl __secondary_start_gemini | ||
1181 | __secondary_start_gemini: | ||
1182 | mfspr r4,SPRN_HID0 | ||
1183 | ori r4,r4,HID0_ICFI | ||
1184 | li r3,0 | ||
1185 | ori r3,r3,HID0_ICE | ||
1186 | andc r4,r4,r3 | ||
1187 | mtspr SPRN_HID0,r4 | ||
1188 | sync | ||
1189 | bl gemini_prom_init | ||
1190 | b __secondary_start | ||
1191 | #endif /* CONFIG_GEMINI */ | ||
1192 | .globl __secondary_start_psurge | ||
1193 | __secondary_start_psurge: | ||
1194 | li r24,1 /* cpu # */ | ||
1195 | b __secondary_start_psurge99 | ||
1196 | .globl __secondary_start_psurge2 | ||
1197 | __secondary_start_psurge2: | ||
1198 | li r24,2 /* cpu # */ | ||
1199 | b __secondary_start_psurge99 | ||
1200 | .globl __secondary_start_psurge3 | ||
1201 | __secondary_start_psurge3: | ||
1202 | li r24,3 /* cpu # */ | ||
1203 | b __secondary_start_psurge99 | ||
1204 | __secondary_start_psurge99: | ||
1205 | /* we come in here with IR=0 and DR=1, and DBAT 0 | ||
1206 | set to map the 0xf0000000 - 0xffffffff region */ | ||
1207 | mfmsr r0 | ||
1208 | rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ | ||
1209 | SYNC | ||
1210 | mtmsr r0 | ||
1211 | isync | ||
1212 | |||
1213 | .globl __secondary_start | ||
1214 | __secondary_start: | ||
1215 | #ifdef CONFIG_PPC64BRIDGE | ||
1216 | mfmsr r0 | ||
1217 | clrldi r0,r0,1 /* make sure it's in 32-bit mode */ | ||
1218 | SYNC | ||
1219 | MTMSRD(r0) | ||
1220 | isync | ||
1221 | #endif | ||
1222 | /* Copy some CPU settings from CPU 0 */ | ||
1223 | bl __restore_cpu_setup | ||
1224 | |||
1225 | lis r3,-KERNELBASE@h | ||
1226 | mr r4,r24 | ||
1227 | bl identify_cpu | ||
1228 | bl call_setup_cpu /* Call setup_cpu for this CPU */ | ||
1229 | #ifdef CONFIG_6xx | ||
1230 | lis r3,-KERNELBASE@h | ||
1231 | bl init_idle_6xx | ||
1232 | #endif /* CONFIG_6xx */ | ||
1233 | #ifdef CONFIG_POWER4 | ||
1234 | lis r3,-KERNELBASE@h | ||
1235 | bl init_idle_power4 | ||
1236 | #endif /* CONFIG_POWER4 */ | ||
1237 | |||
1238 | /* get current_thread_info and current */ | ||
1239 | lis r1,secondary_ti@ha | ||
1240 | tophys(r1,r1) | ||
1241 | lwz r1,secondary_ti@l(r1) | ||
1242 | tophys(r2,r1) | ||
1243 | lwz r2,TI_TASK(r2) | ||
1244 | |||
1245 | /* stack */ | ||
1246 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | ||
1247 | li r0,0 | ||
1248 | tophys(r3,r1) | ||
1249 | stw r0,0(r3) | ||
1250 | |||
1251 | /* load up the MMU */ | ||
1252 | bl load_up_mmu | ||
1253 | |||
1254 | /* ptr to phys current thread */ | ||
1255 | tophys(r4,r2) | ||
1256 | addi r4,r4,THREAD /* phys address of our thread_struct */ | ||
1257 | CLR_TOP32(r4) | ||
1258 | mtspr SPRN_SPRG3,r4 | ||
1259 | li r3,0 | ||
1260 | mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */ | ||
1261 | |||
1262 | /* enable MMU and jump to start_secondary */ | ||
1263 | li r4,MSR_KERNEL | ||
1264 | FIX_SRR1(r4,r5) | ||
1265 | lis r3,start_secondary@h | ||
1266 | ori r3,r3,start_secondary@l | ||
1267 | mtspr SPRN_SRR0,r3 | ||
1268 | mtspr SPRN_SRR1,r4 | ||
1269 | SYNC | ||
1270 | RFI | ||
1271 | #endif /* CONFIG_SMP */ | ||
1272 | |||
1273 | /* | ||
1274 | * Those generic dummy functions are kept for CPUs not | ||
1275 | * included in CONFIG_6xx | ||
1276 | */ | ||
1277 | _GLOBAL(__setup_cpu_power3) | ||
1278 | blr | ||
1279 | _GLOBAL(__setup_cpu_generic) | ||
1280 | blr | ||
1281 | |||
1282 | #if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) | ||
1283 | _GLOBAL(__save_cpu_setup) | ||
1284 | blr | ||
1285 | _GLOBAL(__restore_cpu_setup) | ||
1286 | blr | ||
1287 | #endif /* !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) */ | ||
1288 | |||
1289 | |||
1290 | /* | ||
1291 | * Load stuff into the MMU. Intended to be called with | ||
1292 | * IR=0 and DR=0. | ||
1293 | */ | ||
1294 | load_up_mmu: | ||
1295 | sync /* Force all PTE updates to finish */ | ||
1296 | isync | ||
1297 | tlbia /* Clear all TLB entries */ | ||
1298 | sync /* wait for tlbia/tlbie to finish */ | ||
1299 | TLBSYNC /* ... on all CPUs */ | ||
1300 | /* Load the SDR1 register (hash table base & size) */ | ||
1301 | lis r6,_SDR1@ha | ||
1302 | tophys(r6,r6) | ||
1303 | lwz r6,_SDR1@l(r6) | ||
1304 | mtspr SPRN_SDR1,r6 | ||
1305 | #ifdef CONFIG_PPC64BRIDGE | ||
1306 | /* clear the ASR so we only use the pseudo-segment registers. */ | ||
1307 | li r6,0 | ||
1308 | mtasr r6 | ||
1309 | #endif /* CONFIG_PPC64BRIDGE */ | ||
1310 | li r0,16 /* load up segment register values */ | ||
1311 | mtctr r0 /* for context 0 */ | ||
1312 | lis r3,0x2000 /* Ku = 1, VSID = 0 */ | ||
1313 | li r4,0 | ||
1314 | 3: mtsrin r3,r4 | ||
1315 | addi r3,r3,0x111 /* increment VSID */ | ||
1316 | addis r4,r4,0x1000 /* address of next segment */ | ||
1317 | bdnz 3b | ||
1318 | #ifndef CONFIG_POWER4 | ||
1319 | /* Load the BAT registers with the values set up by MMU_init. | ||
1320 | MMU_init takes care of whether we're on a 601 or not. */ | ||
1321 | mfpvr r3 | ||
1322 | srwi r3,r3,16 | ||
1323 | cmpwi r3,1 | ||
1324 | lis r3,BATS@ha | ||
1325 | addi r3,r3,BATS@l | ||
1326 | tophys(r3,r3) | ||
1327 | LOAD_BAT(0,r3,r4,r5) | ||
1328 | LOAD_BAT(1,r3,r4,r5) | ||
1329 | LOAD_BAT(2,r3,r4,r5) | ||
1330 | LOAD_BAT(3,r3,r4,r5) | ||
1331 | #endif /* CONFIG_POWER4 */ | ||
1332 | blr | ||
1333 | |||
1334 | /* | ||
1335 | * This is where the main kernel code starts. | ||
1336 | */ | ||
1337 | start_here: | ||
1338 | /* ptr to current */ | ||
1339 | lis r2,init_task@h | ||
1340 | ori r2,r2,init_task@l | ||
1341 | /* Set up for using our exception vectors */ | ||
1342 | /* ptr to phys current thread */ | ||
1343 | tophys(r4,r2) | ||
1344 | addi r4,r4,THREAD /* init task's THREAD */ | ||
1345 | CLR_TOP32(r4) | ||
1346 | mtspr SPRN_SPRG3,r4 | ||
1347 | li r3,0 | ||
1348 | mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */ | ||
1349 | |||
1350 | /* stack */ | ||
1351 | lis r1,init_thread_union@ha | ||
1352 | addi r1,r1,init_thread_union@l | ||
1353 | li r0,0 | ||
1354 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | ||
1355 | /* | ||
1356 | * Do early bootinfo parsing, platform-specific initialization, | ||
1357 | * and set up the MMU. | ||
1358 | */ | ||
1359 | mr r3,r31 | ||
1360 | mr r4,r30 | ||
1361 | mr r5,r29 | ||
1362 | mr r6,r28 | ||
1363 | mr r7,r27 | ||
1364 | bl machine_init | ||
1365 | bl MMU_init | ||
1366 | |||
1367 | #ifdef CONFIG_APUS | ||
1368 | /* Copy exception code to exception vector base on APUS. */ | ||
1369 | lis r4,KERNELBASE@h | ||
1370 | #ifdef CONFIG_APUS_FAST_EXCEPT | ||
1371 | lis r3,0xfff0 /* Copy to 0xfff00000 */ | ||
1372 | #else | ||
1373 | lis r3,0 /* Copy to 0x00000000 */ | ||
1374 | #endif | ||
1375 | li r5,0x4000 /* # bytes of memory to copy */ | ||
1376 | li r6,0 | ||
1377 | bl copy_and_flush /* copy the first 0x4000 bytes */ | ||
1378 | #endif /* CONFIG_APUS */ | ||
1379 | |||
1380 | /* | ||
1381 | * Go back to running unmapped so we can load up new values | ||
1382 | * for SDR1 (hash table pointer) and the segment registers | ||
1383 | * and change to using our exception vectors. | ||
1384 | */ | ||
1385 | lis r4,2f@h | ||
1386 | ori r4,r4,2f@l | ||
1387 | tophys(r4,r4) | ||
1388 | li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) | ||
1389 | FIX_SRR1(r3,r5) | ||
1390 | mtspr SPRN_SRR0,r4 | ||
1391 | mtspr SPRN_SRR1,r3 | ||
1392 | SYNC | ||
1393 | RFI | ||
1394 | /* Load up the kernel context */ | ||
1395 | 2: bl load_up_mmu | ||
1396 | |||
1397 | #ifdef CONFIG_BDI_SWITCH | ||
1398 | /* Add helper information for the Abatron bdiGDB debugger. | ||
1399 | * We do this here because we know the mmu is disabled, and | ||
1400 | * will be enabled for real in just a few instructions. | ||
1401 | */ | ||
1402 | lis r5, abatron_pteptrs@h | ||
1403 | ori r5, r5, abatron_pteptrs@l | ||
1404 | stw r5, 0xf0(r0) /* This much match your Abatron config */ | ||
1405 | lis r6, swapper_pg_dir@h | ||
1406 | ori r6, r6, swapper_pg_dir@l | ||
1407 | tophys(r5, r5) | ||
1408 | stw r6, 0(r5) | ||
1409 | #endif /* CONFIG_BDI_SWITCH */ | ||
1410 | |||
1411 | /* Now turn on the MMU for real! */ | ||
1412 | li r4,MSR_KERNEL | ||
1413 | FIX_SRR1(r4,r5) | ||
1414 | lis r3,start_kernel@h | ||
1415 | ori r3,r3,start_kernel@l | ||
1416 | mtspr SPRN_SRR0,r3 | ||
1417 | mtspr SPRN_SRR1,r4 | ||
1418 | SYNC | ||
1419 | RFI | ||
1420 | |||
1421 | /* | ||
1422 | * Set up the segment registers for a new context. | ||
1423 | */ | ||
1424 | _GLOBAL(set_context) | ||
1425 | mulli r3,r3,897 /* multiply context by skew factor */ | ||
1426 | rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */ | ||
1427 | addis r3,r3,0x6000 /* Set Ks, Ku bits */ | ||
1428 | li r0,NUM_USER_SEGMENTS | ||
1429 | mtctr r0 | ||
1430 | |||
1431 | #ifdef CONFIG_BDI_SWITCH | ||
1432 | /* Context switch the PTE pointer for the Abatron BDI2000. | ||
1433 | * The PGDIR is passed as second argument. | ||
1434 | */ | ||
1435 | lis r5, KERNELBASE@h | ||
1436 | lwz r5, 0xf0(r5) | ||
1437 | stw r4, 0x4(r5) | ||
1438 | #endif | ||
1439 | li r4,0 | ||
1440 | isync | ||
1441 | 3: | ||
1442 | #ifdef CONFIG_PPC64BRIDGE | ||
1443 | slbie r4 | ||
1444 | #endif /* CONFIG_PPC64BRIDGE */ | ||
1445 | mtsrin r3,r4 | ||
1446 | addi r3,r3,0x111 /* next VSID */ | ||
1447 | rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */ | ||
1448 | addis r4,r4,0x1000 /* address of next segment */ | ||
1449 | bdnz 3b | ||
1450 | sync | ||
1451 | isync | ||
1452 | blr | ||
1453 | |||
1454 | /* | ||
1455 | * An undocumented "feature" of 604e requires that the v bit | ||
1456 | * be cleared before changing BAT values. | ||
1457 | * | ||
1458 | * Also, newer IBM firmware does not clear bat3 and 4 so | ||
1459 | * this makes sure it's done. | ||
1460 | * -- Cort | ||
1461 | */ | ||
1462 | clear_bats: | ||
1463 | li r10,0 | ||
1464 | mfspr r9,SPRN_PVR | ||
1465 | rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ | ||
1466 | cmpwi r9, 1 | ||
1467 | beq 1f | ||
1468 | |||
1469 | mtspr SPRN_DBAT0U,r10 | ||
1470 | mtspr SPRN_DBAT0L,r10 | ||
1471 | mtspr SPRN_DBAT1U,r10 | ||
1472 | mtspr SPRN_DBAT1L,r10 | ||
1473 | mtspr SPRN_DBAT2U,r10 | ||
1474 | mtspr SPRN_DBAT2L,r10 | ||
1475 | mtspr SPRN_DBAT3U,r10 | ||
1476 | mtspr SPRN_DBAT3L,r10 | ||
1477 | 1: | ||
1478 | mtspr SPRN_IBAT0U,r10 | ||
1479 | mtspr SPRN_IBAT0L,r10 | ||
1480 | mtspr SPRN_IBAT1U,r10 | ||
1481 | mtspr SPRN_IBAT1L,r10 | ||
1482 | mtspr SPRN_IBAT2U,r10 | ||
1483 | mtspr SPRN_IBAT2L,r10 | ||
1484 | mtspr SPRN_IBAT3U,r10 | ||
1485 | mtspr SPRN_IBAT3L,r10 | ||
1486 | BEGIN_FTR_SECTION | ||
1487 | /* Here's a tweak: at this point, CPU setup have | ||
1488 | * not been called yet, so HIGH_BAT_EN may not be | ||
1489 | * set in HID0 for the 745x processors. However, it | ||
1490 | * seems that doesn't affect our ability to actually | ||
1491 | * write to these SPRs. | ||
1492 | */ | ||
1493 | mtspr SPRN_DBAT4U,r10 | ||
1494 | mtspr SPRN_DBAT4L,r10 | ||
1495 | mtspr SPRN_DBAT5U,r10 | ||
1496 | mtspr SPRN_DBAT5L,r10 | ||
1497 | mtspr SPRN_DBAT6U,r10 | ||
1498 | mtspr SPRN_DBAT6L,r10 | ||
1499 | mtspr SPRN_DBAT7U,r10 | ||
1500 | mtspr SPRN_DBAT7L,r10 | ||
1501 | mtspr SPRN_IBAT4U,r10 | ||
1502 | mtspr SPRN_IBAT4L,r10 | ||
1503 | mtspr SPRN_IBAT5U,r10 | ||
1504 | mtspr SPRN_IBAT5L,r10 | ||
1505 | mtspr SPRN_IBAT6U,r10 | ||
1506 | mtspr SPRN_IBAT6L,r10 | ||
1507 | mtspr SPRN_IBAT7U,r10 | ||
1508 | mtspr SPRN_IBAT7L,r10 | ||
1509 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) | ||
1510 | blr | ||
1511 | |||
1512 | flush_tlbs: | ||
1513 | lis r10, 0x40 | ||
1514 | 1: addic. r10, r10, -0x1000 | ||
1515 | tlbie r10 | ||
1516 | blt 1b | ||
1517 | sync | ||
1518 | blr | ||
1519 | |||
1520 | mmu_off: | ||
1521 | addi r4, r3, __after_mmu_off - _start | ||
1522 | mfmsr r3 | ||
1523 | andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ | ||
1524 | beqlr | ||
1525 | andc r3,r3,r0 | ||
1526 | mtspr SPRN_SRR0,r4 | ||
1527 | mtspr SPRN_SRR1,r3 | ||
1528 | sync | ||
1529 | RFI | ||
1530 | |||
1531 | #ifndef CONFIG_POWER4 | ||
1532 | /* | ||
1533 | * Use the first pair of BAT registers to map the 1st 16MB | ||
1534 | * of RAM to KERNELBASE. From this point on we can't safely | ||
1535 | * call OF any more. | ||
1536 | */ | ||
1537 | initial_bats: | ||
1538 | lis r11,KERNELBASE@h | ||
1539 | #ifndef CONFIG_PPC64BRIDGE | ||
1540 | mfspr r9,SPRN_PVR | ||
1541 | rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ | ||
1542 | cmpwi 0,r9,1 | ||
1543 | bne 4f | ||
1544 | ori r11,r11,4 /* set up BAT registers for 601 */ | ||
1545 | li r8,0x7f /* valid, block length = 8MB */ | ||
1546 | oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */ | ||
1547 | oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */ | ||
1548 | mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */ | ||
1549 | mtspr SPRN_IBAT0L,r8 /* lower BAT register */ | ||
1550 | mtspr SPRN_IBAT1U,r9 | ||
1551 | mtspr SPRN_IBAT1L,r10 | ||
1552 | isync | ||
1553 | blr | ||
1554 | #endif /* CONFIG_PPC64BRIDGE */ | ||
1555 | |||
1556 | 4: tophys(r8,r11) | ||
1557 | #ifdef CONFIG_SMP | ||
1558 | ori r8,r8,0x12 /* R/W access, M=1 */ | ||
1559 | #else | ||
1560 | ori r8,r8,2 /* R/W access */ | ||
1561 | #endif /* CONFIG_SMP */ | ||
1562 | #ifdef CONFIG_APUS | ||
1563 | ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */ | ||
1564 | #else | ||
1565 | ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */ | ||
1566 | #endif /* CONFIG_APUS */ | ||
1567 | |||
1568 | #ifdef CONFIG_PPC64BRIDGE | ||
1569 | /* clear out the high 32 bits in the BAT */ | ||
1570 | clrldi r11,r11,32 | ||
1571 | clrldi r8,r8,32 | ||
1572 | #endif /* CONFIG_PPC64BRIDGE */ | ||
1573 | mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */ | ||
1574 | mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */ | ||
1575 | mtspr SPRN_IBAT0L,r8 | ||
1576 | mtspr SPRN_IBAT0U,r11 | ||
1577 | isync | ||
1578 | blr | ||
1579 | |||
1580 | #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) | ||
1581 | setup_disp_bat: | ||
1582 | /* | ||
1583 | * setup the display bat prepared for us in prom.c | ||
1584 | */ | ||
1585 | mflr r8 | ||
1586 | bl reloc_offset | ||
1587 | mtlr r8 | ||
1588 | addis r8,r3,disp_BAT@ha | ||
1589 | addi r8,r8,disp_BAT@l | ||
1590 | lwz r11,0(r8) | ||
1591 | lwz r8,4(r8) | ||
1592 | mfspr r9,SPRN_PVR | ||
1593 | rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ | ||
1594 | cmpwi 0,r9,1 | ||
1595 | beq 1f | ||
1596 | mtspr SPRN_DBAT3L,r8 | ||
1597 | mtspr SPRN_DBAT3U,r11 | ||
1598 | blr | ||
1599 | 1: mtspr SPRN_IBAT3L,r8 | ||
1600 | mtspr SPRN_IBAT3U,r11 | ||
1601 | blr | ||
1602 | |||
1603 | #endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */ | ||
1604 | |||
1605 | #else /* CONFIG_POWER4 */ | ||
1606 | /* | ||
1607 | * Load up the SDR1 and segment register values now | ||
1608 | * since we don't have the BATs. | ||
1609 | * Also make sure we are running in 32-bit mode. | ||
1610 | */ | ||
1611 | |||
1612 | initial_mm_power4: | ||
1613 | addis r14,r3,_SDR1@ha /* get the value from _SDR1 */ | ||
1614 | lwz r14,_SDR1@l(r14) /* assume hash table below 4GB */ | ||
1615 | mtspr SPRN_SDR1,r14 | ||
1616 | slbia | ||
1617 | lis r4,0x2000 /* set pseudo-segment reg 12 */ | ||
1618 | ori r5,r4,0x0ccc | ||
1619 | mtsr 12,r5 | ||
1620 | #if 0 | ||
1621 | ori r5,r4,0x0888 /* set pseudo-segment reg 8 */ | ||
1622 | mtsr 8,r5 /* (for access to serial port) */ | ||
1623 | #endif | ||
1624 | #ifdef CONFIG_BOOTX_TEXT | ||
1625 | ori r5,r4,0x0999 /* set pseudo-segment reg 9 */ | ||
1626 | mtsr 9,r5 /* (for access to screen) */ | ||
1627 | #endif | ||
1628 | mfmsr r0 | ||
1629 | clrldi r0,r0,1 | ||
1630 | sync | ||
1631 | mtmsr r0 | ||
1632 | isync | ||
1633 | blr | ||
1634 | |||
1635 | #endif /* CONFIG_POWER4 */ | ||
1636 | |||
1637 | #ifdef CONFIG_8260 | ||
1638 | /* Jump into the system reset for the rom. | ||
1639 | * We first disable the MMU, and then jump to the ROM reset address. | ||
1640 | * | ||
1641 | * r3 is the board info structure, r4 is the location for starting. | ||
1642 | * I use this for building a small kernel that can load other kernels, | ||
1643 | * rather than trying to write or rely on a rom monitor that can tftp load. | ||
1644 | */ | ||
1645 | .globl m8260_gorom | ||
1646 | m8260_gorom: | ||
1647 | mfmsr r0 | ||
1648 | rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */ | ||
1649 | sync | ||
1650 | mtmsr r0 | ||
1651 | sync | ||
1652 | mfspr r11, SPRN_HID0 | ||
1653 | lis r10, 0 | ||
1654 | ori r10,r10,HID0_ICE|HID0_DCE | ||
1655 | andc r11, r11, r10 | ||
1656 | mtspr SPRN_HID0, r11 | ||
1657 | isync | ||
1658 | li r5, MSR_ME|MSR_RI | ||
1659 | lis r6,2f@h | ||
1660 | addis r6,r6,-KERNELBASE@h | ||
1661 | ori r6,r6,2f@l | ||
1662 | mtspr SPRN_SRR0,r6 | ||
1663 | mtspr SPRN_SRR1,r5 | ||
1664 | isync | ||
1665 | sync | ||
1666 | rfi | ||
1667 | 2: | ||
1668 | mtlr r4 | ||
1669 | blr | ||
1670 | #endif | ||
1671 | |||
1672 | |||
1673 | /* | ||
1674 | * We put a few things here that have to be page-aligned. | ||
1675 | * This stuff goes at the beginning of the data segment, | ||
1676 | * which is page-aligned. | ||
1677 | */ | ||
1678 | .data | ||
1679 | .globl sdata | ||
1680 | sdata: | ||
1681 | .globl empty_zero_page | ||
1682 | empty_zero_page: | ||
1683 | .space 4096 | ||
1684 | |||
1685 | .globl swapper_pg_dir | ||
1686 | swapper_pg_dir: | ||
1687 | .space 4096 | ||
1688 | |||
1689 | /* | ||
1690 | * This space gets a copy of optional info passed to us by the bootstrap | ||
1691 | * Used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
1692 | */ | ||
1693 | .globl cmd_line | ||
1694 | cmd_line: | ||
1695 | .space 512 | ||
1696 | |||
1697 | .globl intercept_table | ||
1698 | intercept_table: | ||
1699 | .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700 | ||
1700 | .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0 | ||
1701 | .long 0, 0, 0, i0x1300, 0, 0, 0, 0 | ||
1702 | .long 0, 0, 0, 0, 0, 0, 0, 0 | ||
1703 | .long 0, 0, 0, 0, 0, 0, 0, 0 | ||
1704 | .long 0, 0, 0, 0, 0, 0, 0, 0 | ||
1705 | |||
1706 | /* Room for two PTE pointers, usually the kernel and current user pointers | ||
1707 | * to their respective root page table. | ||
1708 | */ | ||
1709 | abatron_pteptrs: | ||
1710 | .space 8 | ||
diff --git a/arch/ppc/kernel/head_44x.S b/arch/ppc/kernel/head_44x.S new file mode 100644 index 000000000000..9ed8165a3d6c --- /dev/null +++ b/arch/ppc/kernel/head_44x.S | |||
@@ -0,0 +1,753 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/head_44x.S | ||
3 | * | ||
4 | * Kernel execution entry point code. | ||
5 | * | ||
6 | * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> | ||
7 | * Initial PowerPC version. | ||
8 | * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
9 | * Rewritten for PReP | ||
10 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | ||
11 | * Low-level exception handers, MMU support, and rewrite. | ||
12 | * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> | ||
13 | * PowerPC 8xx modifications. | ||
14 | * Copyright (c) 1998-1999 TiVo, Inc. | ||
15 | * PowerPC 403GCX modifications. | ||
16 | * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> | ||
17 | * PowerPC 403GCX/405GP modifications. | ||
18 | * Copyright 2000 MontaVista Software Inc. | ||
19 | * PPC405 modifications | ||
20 | * PowerPC 403GCX/405GP modifications. | ||
21 | * Author: MontaVista Software, Inc. | ||
22 | * frank_rowand@mvista.com or source@mvista.com | ||
23 | * debbie_chu@mvista.com | ||
24 | * Copyright 2002-2005 MontaVista Software, Inc. | ||
25 | * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> | ||
26 | * | ||
27 | * This program is free software; you can redistribute it and/or modify it | ||
28 | * under the terms of the GNU General Public License as published by the | ||
29 | * Free Software Foundation; either version 2 of the License, or (at your | ||
30 | * option) any later version. | ||
31 | */ | ||
32 | |||
33 | #include <linux/config.h> | ||
34 | #include <asm/processor.h> | ||
35 | #include <asm/page.h> | ||
36 | #include <asm/mmu.h> | ||
37 | #include <asm/pgtable.h> | ||
38 | #include <asm/ibm4xx.h> | ||
39 | #include <asm/ibm44x.h> | ||
40 | #include <asm/cputable.h> | ||
41 | #include <asm/thread_info.h> | ||
42 | #include <asm/ppc_asm.h> | ||
43 | #include <asm/offsets.h> | ||
44 | #include "head_booke.h" | ||
45 | |||
46 | |||
47 | /* As with the other PowerPC ports, it is expected that when code | ||
48 | * execution begins here, the following registers contain valid, yet | ||
49 | * optional, information: | ||
50 | * | ||
51 | * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) | ||
52 | * r4 - Starting address of the init RAM disk | ||
53 | * r5 - Ending address of the init RAM disk | ||
54 | * r6 - Start of kernel command line string (e.g. "mem=128") | ||
55 | * r7 - End of kernel command line string | ||
56 | * | ||
57 | */ | ||
58 | .text | ||
59 | _GLOBAL(_stext) | ||
60 | _GLOBAL(_start) | ||
61 | /* | ||
62 | * Reserve a word at a fixed location to store the address | ||
63 | * of abatron_pteptrs | ||
64 | */ | ||
65 | nop | ||
66 | /* | ||
67 | * Save parameters we are passed | ||
68 | */ | ||
69 | mr r31,r3 | ||
70 | mr r30,r4 | ||
71 | mr r29,r5 | ||
72 | mr r28,r6 | ||
73 | mr r27,r7 | ||
74 | li r24,0 /* CPU number */ | ||
75 | |||
76 | /* | ||
77 | * Set up the initial MMU state | ||
78 | * | ||
79 | * We are still executing code at the virtual address | ||
80 | * mappings set by the firmware for the base of RAM. | ||
81 | * | ||
82 | * We first invalidate all TLB entries but the one | ||
83 | * we are running from. We then load the KERNELBASE | ||
84 | * mappings so we can begin to use kernel addresses | ||
85 | * natively and so the interrupt vector locations are | ||
86 | * permanently pinned (necessary since Book E | ||
87 | * implementations always have translation enabled). | ||
88 | * | ||
89 | * TODO: Use the known TLB entry we are running from to | ||
90 | * determine which physical region we are located | ||
91 | * in. This can be used to determine where in RAM | ||
92 | * (on a shared CPU system) or PCI memory space | ||
93 | * (on a DRAMless system) we are located. | ||
94 | * For now, we assume a perfect world which means | ||
95 | * we are located at the base of DRAM (physical 0). | ||
96 | */ | ||
97 | |||
98 | /* | ||
99 | * Search TLB for entry that we are currently using. | ||
100 | * Invalidate all entries but the one we are using. | ||
101 | */ | ||
102 | /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ | ||
103 | mfspr r3,SPRN_PID /* Get PID */ | ||
104 | mfmsr r4 /* Get MSR */ | ||
105 | andi. r4,r4,MSR_IS@l /* TS=1? */ | ||
106 | beq wmmucr /* If not, leave STS=0 */ | ||
107 | oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */ | ||
108 | wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ | ||
109 | sync | ||
110 | |||
111 | bl invstr /* Find our address */ | ||
112 | invstr: mflr r5 /* Make it accessible */ | ||
113 | tlbsx r23,0,r5 /* Find entry we are in */ | ||
114 | li r4,0 /* Start at TLB entry 0 */ | ||
115 | li r3,0 /* Set PAGEID inval value */ | ||
116 | 1: cmpw r23,r4 /* Is this our entry? */ | ||
117 | beq skpinv /* If so, skip the inval */ | ||
118 | tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ | ||
119 | skpinv: addi r4,r4,1 /* Increment */ | ||
120 | cmpwi r4,64 /* Are we done? */ | ||
121 | bne 1b /* If not, repeat */ | ||
122 | isync /* If so, context change */ | ||
123 | |||
124 | /* | ||
125 | * Configure and load pinned entry into TLB slot 63. | ||
126 | */ | ||
127 | |||
128 | lis r3,KERNELBASE@h /* Load the kernel virtual address */ | ||
129 | ori r3,r3,KERNELBASE@l | ||
130 | |||
131 | /* Kernel is at the base of RAM */ | ||
132 | li r4, 0 /* Load the kernel physical address */ | ||
133 | |||
134 | /* Load the kernel PID = 0 */ | ||
135 | li r0,0 | ||
136 | mtspr SPRN_PID,r0 | ||
137 | sync | ||
138 | |||
139 | /* Initialize MMUCR */ | ||
140 | li r5,0 | ||
141 | mtspr SPRN_MMUCR,r5 | ||
142 | sync | ||
143 | |||
144 | /* pageid fields */ | ||
145 | clrrwi r3,r3,10 /* Mask off the effective page number */ | ||
146 | ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M | ||
147 | |||
148 | /* xlat fields */ | ||
149 | clrrwi r4,r4,10 /* Mask off the real page number */ | ||
150 | /* ERPN is 0 for first 4GB page */ | ||
151 | |||
152 | /* attrib fields */ | ||
153 | /* Added guarded bit to protect against speculative loads/stores */ | ||
154 | li r5,0 | ||
155 | ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) | ||
156 | |||
157 | li r0,63 /* TLB slot 63 */ | ||
158 | |||
159 | tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ | ||
160 | tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ | ||
161 | tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ | ||
162 | |||
163 | /* Force context change */ | ||
164 | mfmsr r0 | ||
165 | mtspr SPRN_SRR1, r0 | ||
166 | lis r0,3f@h | ||
167 | ori r0,r0,3f@l | ||
168 | mtspr SPRN_SRR0,r0 | ||
169 | sync | ||
170 | rfi | ||
171 | |||
172 | /* If necessary, invalidate original entry we used */ | ||
173 | 3: cmpwi r23,63 | ||
174 | beq 4f | ||
175 | li r6,0 | ||
176 | tlbwe r6,r23,PPC44x_TLB_PAGEID | ||
177 | isync | ||
178 | |||
179 | 4: | ||
180 | #ifdef CONFIG_SERIAL_TEXT_DEBUG | ||
181 | /* | ||
182 | * Add temporary UART mapping for early debug. This | ||
183 | * mapping must be identical to that used by the early | ||
184 | * bootloader code since the same asm/serial.h parameters | ||
185 | * are used for polled operation. | ||
186 | */ | ||
187 | /* pageid fields */ | ||
188 | lis r3,UART0_IO_BASE@h | ||
189 | ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M | ||
190 | |||
191 | /* xlat fields */ | ||
192 | lis r4,UART0_PHYS_IO_BASE@h /* RPN depends on SoC */ | ||
193 | ori r4,r4,0x0001 /* ERPN is 1 for second 4GB page */ | ||
194 | |||
195 | /* attrib fields */ | ||
196 | li r5,0 | ||
197 | ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_I | PPC44x_TLB_G) | ||
198 | |||
199 | li r0,1 /* TLB slot 1 */ | ||
200 | |||
201 | tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ | ||
202 | tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ | ||
203 | tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ | ||
204 | |||
205 | /* Force context change */ | ||
206 | isync | ||
207 | #endif /* CONFIG_SERIAL_TEXT_DEBUG */ | ||
208 | |||
209 | /* Establish the interrupt vector offsets */ | ||
210 | SET_IVOR(0, CriticalInput); | ||
211 | SET_IVOR(1, MachineCheck); | ||
212 | SET_IVOR(2, DataStorage); | ||
213 | SET_IVOR(3, InstructionStorage); | ||
214 | SET_IVOR(4, ExternalInput); | ||
215 | SET_IVOR(5, Alignment); | ||
216 | SET_IVOR(6, Program); | ||
217 | SET_IVOR(7, FloatingPointUnavailable); | ||
218 | SET_IVOR(8, SystemCall); | ||
219 | SET_IVOR(9, AuxillaryProcessorUnavailable); | ||
220 | SET_IVOR(10, Decrementer); | ||
221 | SET_IVOR(11, FixedIntervalTimer); | ||
222 | SET_IVOR(12, WatchdogTimer); | ||
223 | SET_IVOR(13, DataTLBError); | ||
224 | SET_IVOR(14, InstructionTLBError); | ||
225 | SET_IVOR(15, Debug); | ||
226 | |||
227 | /* Establish the interrupt vector base */ | ||
228 | lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ | ||
229 | mtspr SPRN_IVPR,r4 | ||
230 | |||
231 | /* | ||
232 | * This is where the main kernel code starts. | ||
233 | */ | ||
234 | |||
235 | /* ptr to current */ | ||
236 | lis r2,init_task@h | ||
237 | ori r2,r2,init_task@l | ||
238 | |||
239 | /* ptr to current thread */ | ||
240 | addi r4,r2,THREAD /* init task's THREAD */ | ||
241 | mtspr SPRN_SPRG3,r4 | ||
242 | |||
243 | /* stack */ | ||
244 | lis r1,init_thread_union@h | ||
245 | ori r1,r1,init_thread_union@l | ||
246 | li r0,0 | ||
247 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | ||
248 | |||
249 | bl early_init | ||
250 | |||
251 | /* | ||
252 | * Decide what sort of machine this is and initialize the MMU. | ||
253 | */ | ||
254 | mr r3,r31 | ||
255 | mr r4,r30 | ||
256 | mr r5,r29 | ||
257 | mr r6,r28 | ||
258 | mr r7,r27 | ||
259 | bl machine_init | ||
260 | bl MMU_init | ||
261 | |||
262 | /* Setup PTE pointers for the Abatron bdiGDB */ | ||
263 | lis r6, swapper_pg_dir@h | ||
264 | ori r6, r6, swapper_pg_dir@l | ||
265 | lis r5, abatron_pteptrs@h | ||
266 | ori r5, r5, abatron_pteptrs@l | ||
267 | lis r4, KERNELBASE@h | ||
268 | ori r4, r4, KERNELBASE@l | ||
269 | stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ | ||
270 | stw r6, 0(r5) | ||
271 | |||
272 | /* Let's move on */ | ||
273 | lis r4,start_kernel@h | ||
274 | ori r4,r4,start_kernel@l | ||
275 | lis r3,MSR_KERNEL@h | ||
276 | ori r3,r3,MSR_KERNEL@l | ||
277 | mtspr SPRN_SRR0,r4 | ||
278 | mtspr SPRN_SRR1,r3 | ||
279 | rfi /* change context and jump to start_kernel */ | ||
280 | |||
281 | /* | ||
282 | * Interrupt vector entry code | ||
283 | * | ||
284 | * The Book E MMUs are always on so we don't need to handle | ||
285 | * interrupts in real mode as with previous PPC processors. In | ||
286 | * this case we handle interrupts in the kernel virtual address | ||
287 | * space. | ||
288 | * | ||
289 | * Interrupt vectors are dynamically placed relative to the | ||
290 | * interrupt prefix as determined by the address of interrupt_base. | ||
291 | * The interrupt vectors offsets are programmed using the labels | ||
292 | * for each interrupt vector entry. | ||
293 | * | ||
294 | * Interrupt vectors must be aligned on a 16 byte boundary. | ||
295 | * We align on a 32 byte cache line boundary for good measure. | ||
296 | */ | ||
297 | |||
298 | interrupt_base: | ||
299 | /* Critical Input Interrupt */ | ||
300 | CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException) | ||
301 | |||
302 | /* Machine Check Interrupt */ | ||
303 | #ifdef CONFIG_440A | ||
304 | MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException) | ||
305 | #else | ||
306 | CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) | ||
307 | #endif | ||
308 | |||
309 | /* Data Storage Interrupt */ | ||
310 | START_EXCEPTION(DataStorage) | ||
311 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
312 | mtspr SPRN_SPRG1, r11 | ||
313 | mtspr SPRN_SPRG4W, r12 | ||
314 | mtspr SPRN_SPRG5W, r13 | ||
315 | mfcr r11 | ||
316 | mtspr SPRN_SPRG7W, r11 | ||
317 | |||
318 | /* | ||
319 | * Check if it was a store fault, if not then bail | ||
320 | * because a user tried to access a kernel or | ||
321 | * read-protected page. Otherwise, get the | ||
322 | * offending address and handle it. | ||
323 | */ | ||
324 | mfspr r10, SPRN_ESR | ||
325 | andis. r10, r10, ESR_ST@h | ||
326 | beq 2f | ||
327 | |||
328 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
329 | |||
330 | /* If we are faulting a kernel address, we have to use the | ||
331 | * kernel page tables. | ||
332 | */ | ||
333 | andis. r11, r10, 0x8000 | ||
334 | beq 3f | ||
335 | lis r11, swapper_pg_dir@h | ||
336 | ori r11, r11, swapper_pg_dir@l | ||
337 | |||
338 | mfspr r12,SPRN_MMUCR | ||
339 | rlwinm r12,r12,0,0,23 /* Clear TID */ | ||
340 | |||
341 | b 4f | ||
342 | |||
343 | /* Get the PGD for the current thread */ | ||
344 | 3: | ||
345 | mfspr r11,SPRN_SPRG3 | ||
346 | lwz r11,PGDIR(r11) | ||
347 | |||
348 | /* Load PID into MMUCR TID */ | ||
349 | mfspr r12,SPRN_MMUCR /* Get MMUCR */ | ||
350 | mfspr r13,SPRN_PID /* Get PID */ | ||
351 | rlwimi r12,r13,0,24,31 /* Set TID */ | ||
352 | |||
353 | 4: | ||
354 | mtspr SPRN_MMUCR,r12 | ||
355 | |||
356 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ | ||
357 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ | ||
358 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ | ||
359 | beq 2f /* Bail if no table */ | ||
360 | |||
361 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ | ||
362 | lwz r11, 4(r12) /* Get pte entry */ | ||
363 | |||
364 | andi. r13, r11, _PAGE_RW /* Is it writeable? */ | ||
365 | beq 2f /* Bail if not */ | ||
366 | |||
367 | /* Update 'changed'. | ||
368 | */ | ||
369 | ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE | ||
370 | stw r11, 4(r12) /* Update Linux page table */ | ||
371 | |||
372 | li r13, PPC44x_TLB_SR@l /* Set SR */ | ||
373 | rlwimi r13, r11, 29, 29, 29 /* SX = _PAGE_HWEXEC */ | ||
374 | rlwimi r13, r11, 0, 30, 30 /* SW = _PAGE_RW */ | ||
375 | rlwimi r13, r11, 29, 28, 28 /* UR = _PAGE_USER */ | ||
376 | rlwimi r12, r11, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */ | ||
377 | rlwimi r12, r11, 29, 30, 30 /* (_PAGE_USER>>3)->r12 */ | ||
378 | and r12, r12, r11 /* HWEXEC/RW & USER */ | ||
379 | rlwimi r13, r12, 0, 26, 26 /* UX = HWEXEC & USER */ | ||
380 | rlwimi r13, r12, 3, 27, 27 /* UW = RW & USER */ | ||
381 | |||
382 | rlwimi r11,r13,0,26,31 /* Insert static perms */ | ||
383 | |||
384 | rlwinm r11,r11,0,20,15 /* Clear U0-U3 */ | ||
385 | |||
386 | /* find the TLB index that caused the fault. It has to be here. */ | ||
387 | tlbsx r10, 0, r10 | ||
388 | |||
389 | tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */ | ||
390 | |||
391 | /* Done...restore registers and get out of here. | ||
392 | */ | ||
393 | mfspr r11, SPRN_SPRG7R | ||
394 | mtcr r11 | ||
395 | mfspr r13, SPRN_SPRG5R | ||
396 | mfspr r12, SPRN_SPRG4R | ||
397 | |||
398 | mfspr r11, SPRN_SPRG1 | ||
399 | mfspr r10, SPRN_SPRG0 | ||
400 | rfi /* Force context change */ | ||
401 | |||
402 | 2: | ||
403 | /* | ||
404 | * The bailout. Restore registers to pre-exception conditions | ||
405 | * and call the heavyweights to help us out. | ||
406 | */ | ||
407 | mfspr r11, SPRN_SPRG7R | ||
408 | mtcr r11 | ||
409 | mfspr r13, SPRN_SPRG5R | ||
410 | mfspr r12, SPRN_SPRG4R | ||
411 | |||
412 | mfspr r11, SPRN_SPRG1 | ||
413 | mfspr r10, SPRN_SPRG0 | ||
414 | b data_access | ||
415 | |||
416 | /* Instruction Storage Interrupt */ | ||
417 | INSTRUCTION_STORAGE_EXCEPTION | ||
418 | |||
419 | /* External Input Interrupt */ | ||
420 | EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) | ||
421 | |||
422 | /* Alignment Interrupt */ | ||
423 | ALIGNMENT_EXCEPTION | ||
424 | |||
425 | /* Program Interrupt */ | ||
426 | PROGRAM_EXCEPTION | ||
427 | |||
428 | /* Floating Point Unavailable Interrupt */ | ||
429 | EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) | ||
430 | |||
431 | /* System Call Interrupt */ | ||
432 | START_EXCEPTION(SystemCall) | ||
433 | NORMAL_EXCEPTION_PROLOG | ||
434 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) | ||
435 | |||
436 | /* Auxillary Processor Unavailable Interrupt */ | ||
437 | EXCEPTION(0x2020, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE) | ||
438 | |||
439 | /* Decrementer Interrupt */ | ||
440 | DECREMENTER_EXCEPTION | ||
441 | |||
442 | /* Fixed Internal Timer Interrupt */ | ||
443 | /* TODO: Add FIT support */ | ||
444 | EXCEPTION(0x1010, FixedIntervalTimer, UnknownException, EXC_XFER_EE) | ||
445 | |||
446 | /* Watchdog Timer Interrupt */ | ||
447 | /* TODO: Add watchdog support */ | ||
448 | CRITICAL_EXCEPTION(0x1020, WatchdogTimer, UnknownException) | ||
449 | |||
450 | /* Data TLB Error Interrupt */ | ||
451 | START_EXCEPTION(DataTLBError) | ||
452 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
453 | mtspr SPRN_SPRG1, r11 | ||
454 | mtspr SPRN_SPRG4W, r12 | ||
455 | mtspr SPRN_SPRG5W, r13 | ||
456 | mfcr r11 | ||
457 | mtspr SPRN_SPRG7W, r11 | ||
458 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
459 | |||
460 | /* If we are faulting a kernel address, we have to use the | ||
461 | * kernel page tables. | ||
462 | */ | ||
463 | andis. r11, r10, 0x8000 | ||
464 | beq 3f | ||
465 | lis r11, swapper_pg_dir@h | ||
466 | ori r11, r11, swapper_pg_dir@l | ||
467 | |||
468 | mfspr r12,SPRN_MMUCR | ||
469 | rlwinm r12,r12,0,0,23 /* Clear TID */ | ||
470 | |||
471 | b 4f | ||
472 | |||
473 | /* Get the PGD for the current thread */ | ||
474 | 3: | ||
475 | mfspr r11,SPRN_SPRG3 | ||
476 | lwz r11,PGDIR(r11) | ||
477 | |||
478 | /* Load PID into MMUCR TID */ | ||
479 | mfspr r12,SPRN_MMUCR | ||
480 | mfspr r13,SPRN_PID /* Get PID */ | ||
481 | rlwimi r12,r13,0,24,31 /* Set TID */ | ||
482 | |||
483 | 4: | ||
484 | mtspr SPRN_MMUCR,r12 | ||
485 | |||
486 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ | ||
487 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ | ||
488 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ | ||
489 | beq 2f /* Bail if no table */ | ||
490 | |||
491 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ | ||
492 | lwz r11, 4(r12) /* Get pte entry */ | ||
493 | andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ | ||
494 | beq 2f /* Bail if not present */ | ||
495 | |||
496 | ori r11, r11, _PAGE_ACCESSED | ||
497 | stw r11, 4(r12) | ||
498 | |||
499 | /* Jump to common tlb load */ | ||
500 | b finish_tlb_load | ||
501 | |||
502 | 2: | ||
503 | /* The bailout. Restore registers to pre-exception conditions | ||
504 | * and call the heavyweights to help us out. | ||
505 | */ | ||
506 | mfspr r11, SPRN_SPRG7R | ||
507 | mtcr r11 | ||
508 | mfspr r13, SPRN_SPRG5R | ||
509 | mfspr r12, SPRN_SPRG4R | ||
510 | mfspr r11, SPRN_SPRG1 | ||
511 | mfspr r10, SPRN_SPRG0 | ||
512 | b data_access | ||
513 | |||
514 | /* Instruction TLB Error Interrupt */ | ||
515 | /* | ||
516 | * Nearly the same as above, except we get our | ||
517 | * information from different registers and bailout | ||
518 | * to a different point. | ||
519 | */ | ||
520 | START_EXCEPTION(InstructionTLBError) | ||
521 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
522 | mtspr SPRN_SPRG1, r11 | ||
523 | mtspr SPRN_SPRG4W, r12 | ||
524 | mtspr SPRN_SPRG5W, r13 | ||
525 | mfcr r11 | ||
526 | mtspr SPRN_SPRG7W, r11 | ||
527 | mfspr r10, SPRN_SRR0 /* Get faulting address */ | ||
528 | |||
529 | /* If we are faulting a kernel address, we have to use the | ||
530 | * kernel page tables. | ||
531 | */ | ||
532 | andis. r11, r10, 0x8000 | ||
533 | beq 3f | ||
534 | lis r11, swapper_pg_dir@h | ||
535 | ori r11, r11, swapper_pg_dir@l | ||
536 | |||
537 | mfspr r12,SPRN_MMUCR | ||
538 | rlwinm r12,r12,0,0,23 /* Clear TID */ | ||
539 | |||
540 | b 4f | ||
541 | |||
542 | /* Get the PGD for the current thread */ | ||
543 | 3: | ||
544 | mfspr r11,SPRN_SPRG3 | ||
545 | lwz r11,PGDIR(r11) | ||
546 | |||
547 | /* Load PID into MMUCR TID */ | ||
548 | mfspr r12,SPRN_MMUCR | ||
549 | mfspr r13,SPRN_PID /* Get PID */ | ||
550 | rlwimi r12,r13,0,24,31 /* Set TID */ | ||
551 | |||
552 | 4: | ||
553 | mtspr SPRN_MMUCR,r12 | ||
554 | |||
555 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ | ||
556 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ | ||
557 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ | ||
558 | beq 2f /* Bail if no table */ | ||
559 | |||
560 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ | ||
561 | lwz r11, 4(r12) /* Get pte entry */ | ||
562 | andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ | ||
563 | beq 2f /* Bail if not present */ | ||
564 | |||
565 | ori r11, r11, _PAGE_ACCESSED | ||
566 | stw r11, 4(r12) | ||
567 | |||
568 | /* Jump to common TLB load point */ | ||
569 | b finish_tlb_load | ||
570 | |||
571 | 2: | ||
572 | /* The bailout. Restore registers to pre-exception conditions | ||
573 | * and call the heavyweights to help us out. | ||
574 | */ | ||
575 | mfspr r11, SPRN_SPRG7R | ||
576 | mtcr r11 | ||
577 | mfspr r13, SPRN_SPRG5R | ||
578 | mfspr r12, SPRN_SPRG4R | ||
579 | mfspr r11, SPRN_SPRG1 | ||
580 | mfspr r10, SPRN_SPRG0 | ||
581 | b InstructionStorage | ||
582 | |||
583 | /* Debug Interrupt */ | ||
584 | DEBUG_EXCEPTION | ||
585 | |||
586 | /* | ||
587 | * Local functions | ||
588 | */ | ||
589 | /* | ||
590 | * Data TLB exceptions will bail out to this point | ||
591 | * if they can't resolve the lightweight TLB fault. | ||
592 | */ | ||
593 | data_access: | ||
594 | NORMAL_EXCEPTION_PROLOG | ||
595 | mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ | ||
596 | stw r5,_ESR(r11) | ||
597 | mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ | ||
598 | EXC_XFER_EE_LITE(0x0300, handle_page_fault) | ||
599 | |||
600 | /* | ||
601 | |||
602 | * Both the instruction and data TLB miss get to this | ||
603 | * point to load the TLB. | ||
604 | * r10 - EA of fault | ||
605 | * r11 - available to use | ||
606 | * r12 - Pointer to the 64-bit PTE | ||
607 | * r13 - available to use | ||
608 | * MMUCR - loaded with proper value when we get here | ||
609 | * Upon exit, we reload everything and RFI. | ||
610 | */ | ||
611 | finish_tlb_load: | ||
612 | /* | ||
613 | * We set execute, because we don't have the granularity to | ||
614 | * properly set this at the page level (Linux problem). | ||
615 | * If shared is set, we cause a zero PID->TID load. | ||
616 | * Many of these bits are software only. Bits we don't set | ||
617 | * here we (properly should) assume have the appropriate value. | ||
618 | */ | ||
619 | |||
620 | /* Load the next available TLB index */ | ||
621 | lis r13, tlb_44x_index@ha | ||
622 | lwz r13, tlb_44x_index@l(r13) | ||
623 | /* Load the TLB high watermark */ | ||
624 | lis r11, tlb_44x_hwater@ha | ||
625 | lwz r11, tlb_44x_hwater@l(r11) | ||
626 | |||
627 | /* Increment, rollover, and store TLB index */ | ||
628 | addi r13, r13, 1 | ||
629 | cmpw 0, r13, r11 /* reserve entries */ | ||
630 | ble 7f | ||
631 | li r13, 0 | ||
632 | 7: | ||
633 | /* Store the next available TLB index */ | ||
634 | lis r11, tlb_44x_index@ha | ||
635 | stw r13, tlb_44x_index@l(r11) | ||
636 | |||
637 | lwz r11, 0(r12) /* Get MS word of PTE */ | ||
638 | lwz r12, 4(r12) /* Get LS word of PTE */ | ||
639 | rlwimi r11, r12, 0, 0 , 19 /* Insert RPN */ | ||
640 | tlbwe r11, r13, PPC44x_TLB_XLAT /* Write XLAT */ | ||
641 | |||
642 | /* | ||
643 | * Create PAGEID. This is the faulting address, | ||
644 | * page size, and valid flag. | ||
645 | */ | ||
646 | li r11, PPC44x_TLB_VALID | PPC44x_TLB_4K | ||
647 | rlwimi r10, r11, 0, 20, 31 /* Insert valid and page size */ | ||
648 | tlbwe r10, r13, PPC44x_TLB_PAGEID /* Write PAGEID */ | ||
649 | |||
650 | li r10, PPC44x_TLB_SR@l /* Set SR */ | ||
651 | rlwimi r10, r12, 0, 30, 30 /* Set SW = _PAGE_RW */ | ||
652 | rlwimi r10, r12, 29, 29, 29 /* SX = _PAGE_HWEXEC */ | ||
653 | rlwimi r10, r12, 29, 28, 28 /* UR = _PAGE_USER */ | ||
654 | rlwimi r11, r12, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */ | ||
655 | and r11, r12, r11 /* HWEXEC & USER */ | ||
656 | rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */ | ||
657 | |||
658 | rlwimi r12, r10, 0, 26, 31 /* Insert static perms */ | ||
659 | rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */ | ||
660 | tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */ | ||
661 | |||
662 | /* Done...restore registers and get out of here. | ||
663 | */ | ||
664 | mfspr r11, SPRN_SPRG7R | ||
665 | mtcr r11 | ||
666 | mfspr r13, SPRN_SPRG5R | ||
667 | mfspr r12, SPRN_SPRG4R | ||
668 | mfspr r11, SPRN_SPRG1 | ||
669 | mfspr r10, SPRN_SPRG0 | ||
670 | rfi /* Force context change */ | ||
671 | |||
672 | /* | ||
673 | * Global functions | ||
674 | */ | ||
675 | |||
676 | /* | ||
677 | * extern void giveup_altivec(struct task_struct *prev) | ||
678 | * | ||
679 | * The 44x core does not have an AltiVec unit. | ||
680 | */ | ||
681 | _GLOBAL(giveup_altivec) | ||
682 | blr | ||
683 | |||
684 | /* | ||
685 | * extern void giveup_fpu(struct task_struct *prev) | ||
686 | * | ||
687 | * The 44x core does not have an FPU. | ||
688 | */ | ||
689 | _GLOBAL(giveup_fpu) | ||
690 | blr | ||
691 | |||
692 | /* | ||
693 | * extern void abort(void) | ||
694 | * | ||
695 | * At present, this routine just applies a system reset. | ||
696 | */ | ||
697 | _GLOBAL(abort) | ||
698 | mfspr r13,SPRN_DBCR0 | ||
699 | oris r13,r13,DBCR0_RST_SYSTEM@h | ||
700 | mtspr SPRN_DBCR0,r13 | ||
701 | |||
702 | _GLOBAL(set_context) | ||
703 | |||
704 | #ifdef CONFIG_BDI_SWITCH | ||
705 | /* Context switch the PTE pointer for the Abatron BDI2000. | ||
706 | * The PGDIR is the second parameter. | ||
707 | */ | ||
708 | lis r5, abatron_pteptrs@h | ||
709 | ori r5, r5, abatron_pteptrs@l | ||
710 | stw r4, 0x4(r5) | ||
711 | #endif | ||
712 | mtspr SPRN_PID,r3 | ||
713 | isync /* Force context change */ | ||
714 | blr | ||
715 | |||
716 | /* | ||
717 | * We put a few things here that have to be page-aligned. This stuff | ||
718 | * goes at the beginning of the data segment, which is page-aligned. | ||
719 | */ | ||
720 | .data | ||
721 | _GLOBAL(sdata) | ||
722 | _GLOBAL(empty_zero_page) | ||
723 | .space 4096 | ||
724 | |||
725 | /* | ||
726 | * To support >32-bit physical addresses, we use an 8KB pgdir. | ||
727 | */ | ||
728 | _GLOBAL(swapper_pg_dir) | ||
729 | .space 8192 | ||
730 | |||
731 | /* Reserved 4k for the critical exception stack & 4k for the machine | ||
732 | * check stack per CPU for kernel mode exceptions */ | ||
733 | .section .bss | ||
734 | .align 12 | ||
735 | exception_stack_bottom: | ||
736 | .space BOOKE_EXCEPTION_STACK_SIZE | ||
737 | _GLOBAL(exception_stack_top) | ||
738 | |||
739 | /* | ||
740 | * This space gets a copy of optional info passed to us by the bootstrap | ||
741 | * which is used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
742 | */ | ||
743 | _GLOBAL(cmd_line) | ||
744 | .space 512 | ||
745 | |||
746 | /* | ||
747 | * Room for two PTE pointers, usually the kernel and current user pointers | ||
748 | * to their respective root page table. | ||
749 | */ | ||
750 | abatron_pteptrs: | ||
751 | .space 8 | ||
752 | |||
753 | |||
diff --git a/arch/ppc/kernel/head_4xx.S b/arch/ppc/kernel/head_4xx.S new file mode 100644 index 000000000000..6f5d380e2345 --- /dev/null +++ b/arch/ppc/kernel/head_4xx.S | |||
@@ -0,0 +1,1010 @@ | |||
1 | /* | ||
2 | * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> | ||
3 | * Initial PowerPC version. | ||
4 | * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
5 | * Rewritten for PReP | ||
6 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | ||
7 | * Low-level exception handers, MMU support, and rewrite. | ||
8 | * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> | ||
9 | * PowerPC 8xx modifications. | ||
10 | * Copyright (c) 1998-1999 TiVo, Inc. | ||
11 | * PowerPC 403GCX modifications. | ||
12 | * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> | ||
13 | * PowerPC 403GCX/405GP modifications. | ||
14 | * Copyright 2000 MontaVista Software Inc. | ||
15 | * PPC405 modifications | ||
16 | * PowerPC 403GCX/405GP modifications. | ||
17 | * Author: MontaVista Software, Inc. | ||
18 | * frank_rowand@mvista.com or source@mvista.com | ||
19 | * debbie_chu@mvista.com | ||
20 | * | ||
21 | * | ||
22 | * Module name: head_4xx.S | ||
23 | * | ||
24 | * Description: | ||
25 | * Kernel execution entry point code. | ||
26 | * | ||
27 | * This program is free software; you can redistribute it and/or | ||
28 | * modify it under the terms of the GNU General Public License | ||
29 | * as published by the Free Software Foundation; either version | ||
30 | * 2 of the License, or (at your option) any later version. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/config.h> | ||
35 | #include <asm/processor.h> | ||
36 | #include <asm/page.h> | ||
37 | #include <asm/mmu.h> | ||
38 | #include <asm/pgtable.h> | ||
39 | #include <asm/ibm4xx.h> | ||
40 | #include <asm/cputable.h> | ||
41 | #include <asm/thread_info.h> | ||
42 | #include <asm/ppc_asm.h> | ||
43 | #include <asm/offsets.h> | ||
44 | |||
45 | /* As with the other PowerPC ports, it is expected that when code | ||
46 | * execution begins here, the following registers contain valid, yet | ||
47 | * optional, information: | ||
48 | * | ||
49 | * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) | ||
50 | * r4 - Starting address of the init RAM disk | ||
51 | * r5 - Ending address of the init RAM disk | ||
52 | * r6 - Start of kernel command line string (e.g. "mem=96m") | ||
53 | * r7 - End of kernel command line string | ||
54 | * | ||
55 | * This is all going to change RSN when we add bi_recs....... -- Dan | ||
56 | */ | ||
57 | .text | ||
58 | _GLOBAL(_stext) | ||
59 | _GLOBAL(_start) | ||
60 | |||
61 | /* Save parameters we are passed. | ||
62 | */ | ||
63 | mr r31,r3 | ||
64 | mr r30,r4 | ||
65 | mr r29,r5 | ||
66 | mr r28,r6 | ||
67 | mr r27,r7 | ||
68 | |||
69 | /* We have to turn on the MMU right away so we get cache modes | ||
70 | * set correctly. | ||
71 | */ | ||
72 | bl initial_mmu | ||
73 | |||
74 | /* We now have the lower 16 Meg mapped into TLB entries, and the caches | ||
75 | * ready to work. | ||
76 | */ | ||
77 | turn_on_mmu: | ||
78 | lis r0,MSR_KERNEL@h | ||
79 | ori r0,r0,MSR_KERNEL@l | ||
80 | mtspr SPRN_SRR1,r0 | ||
81 | lis r0,start_here@h | ||
82 | ori r0,r0,start_here@l | ||
83 | mtspr SPRN_SRR0,r0 | ||
84 | SYNC | ||
85 | rfi /* enables MMU */ | ||
86 | b . /* prevent prefetch past rfi */ | ||
87 | |||
88 | /* | ||
89 | * This area is used for temporarily saving registers during the | ||
90 | * critical exception prolog. | ||
91 | */ | ||
92 | . = 0xc0 | ||
93 | crit_save: | ||
94 | _GLOBAL(crit_r10) | ||
95 | .space 4 | ||
96 | _GLOBAL(crit_r11) | ||
97 | .space 4 | ||
98 | |||
99 | /* | ||
100 | * Exception vector entry code. This code runs with address translation | ||
101 | * turned off (i.e. using physical addresses). We assume SPRG3 has the | ||
102 | * physical address of the current task thread_struct. | ||
103 | * Note that we have to have decremented r1 before we write to any fields | ||
104 | * of the exception frame, since a critical interrupt could occur at any | ||
105 | * time, and it will write to the area immediately below the current r1. | ||
106 | */ | ||
107 | #define NORMAL_EXCEPTION_PROLOG \ | ||
108 | mtspr SPRN_SPRG0,r10; /* save two registers to work with */\ | ||
109 | mtspr SPRN_SPRG1,r11; \ | ||
110 | mtspr SPRN_SPRG2,r1; \ | ||
111 | mfcr r10; /* save CR in r10 for now */\ | ||
112 | mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ | ||
113 | andi. r11,r11,MSR_PR; \ | ||
114 | beq 1f; \ | ||
115 | mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\ | ||
116 | lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\ | ||
117 | addi r1,r1,THREAD_SIZE; \ | ||
118 | 1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\ | ||
119 | tophys(r11,r1); \ | ||
120 | stw r10,_CCR(r11); /* save various registers */\ | ||
121 | stw r12,GPR12(r11); \ | ||
122 | stw r9,GPR9(r11); \ | ||
123 | mfspr r10,SPRN_SPRG0; \ | ||
124 | stw r10,GPR10(r11); \ | ||
125 | mfspr r12,SPRN_SPRG1; \ | ||
126 | stw r12,GPR11(r11); \ | ||
127 | mflr r10; \ | ||
128 | stw r10,_LINK(r11); \ | ||
129 | mfspr r10,SPRN_SPRG2; \ | ||
130 | mfspr r12,SPRN_SRR0; \ | ||
131 | stw r10,GPR1(r11); \ | ||
132 | mfspr r9,SPRN_SRR1; \ | ||
133 | stw r10,0(r11); \ | ||
134 | rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ | ||
135 | stw r0,GPR0(r11); \ | ||
136 | SAVE_4GPRS(3, r11); \ | ||
137 | SAVE_2GPRS(7, r11) | ||
138 | |||
139 | /* | ||
140 | * Exception prolog for critical exceptions. This is a little different | ||
141 | * from the normal exception prolog above since a critical exception | ||
142 | * can potentially occur at any point during normal exception processing. | ||
143 | * Thus we cannot use the same SPRG registers as the normal prolog above. | ||
144 | * Instead we use a couple of words of memory at low physical addresses. | ||
145 | * This is OK since we don't support SMP on these processors. | ||
146 | */ | ||
147 | #define CRITICAL_EXCEPTION_PROLOG \ | ||
148 | stw r10,crit_r10@l(0); /* save two registers to work with */\ | ||
149 | stw r11,crit_r11@l(0); \ | ||
150 | mfcr r10; /* save CR in r10 for now */\ | ||
151 | mfspr r11,SPRN_SRR3; /* check whether user or kernel */\ | ||
152 | andi. r11,r11,MSR_PR; \ | ||
153 | lis r11,critical_stack_top@h; \ | ||
154 | ori r11,r11,critical_stack_top@l; \ | ||
155 | beq 1f; \ | ||
156 | /* COMING FROM USER MODE */ \ | ||
157 | mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\ | ||
158 | lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ | ||
159 | addi r11,r11,THREAD_SIZE; \ | ||
160 | 1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\ | ||
161 | tophys(r11,r11); \ | ||
162 | stw r10,_CCR(r11); /* save various registers */\ | ||
163 | stw r12,GPR12(r11); \ | ||
164 | stw r9,GPR9(r11); \ | ||
165 | mflr r10; \ | ||
166 | stw r10,_LINK(r11); \ | ||
167 | mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ | ||
168 | stw r12,_DEAR(r11); /* since they may have had stuff */\ | ||
169 | mfspr r9,SPRN_ESR; /* in them at the point where the */\ | ||
170 | stw r9,_ESR(r11); /* exception was taken */\ | ||
171 | mfspr r12,SPRN_SRR2; \ | ||
172 | stw r1,GPR1(r11); \ | ||
173 | mfspr r9,SPRN_SRR3; \ | ||
174 | stw r1,0(r11); \ | ||
175 | tovirt(r1,r11); \ | ||
176 | rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ | ||
177 | stw r0,GPR0(r11); \ | ||
178 | SAVE_4GPRS(3, r11); \ | ||
179 | SAVE_2GPRS(7, r11) | ||
180 | |||
181 | /* | ||
182 | * State at this point: | ||
183 | * r9 saved in stack frame, now saved SRR3 & ~MSR_WE | ||
184 | * r10 saved in crit_r10 and in stack frame, trashed | ||
185 | * r11 saved in crit_r11 and in stack frame, | ||
186 | * now phys stack/exception frame pointer | ||
187 | * r12 saved in stack frame, now saved SRR2 | ||
188 | * CR saved in stack frame, CR0.EQ = !SRR3.PR | ||
189 | * LR, DEAR, ESR in stack frame | ||
190 | * r1 saved in stack frame, now virt stack/excframe pointer | ||
191 | * r0, r3-r8 saved in stack frame | ||
192 | */ | ||
193 | |||
194 | /* | ||
195 | * Exception vectors. | ||
196 | */ | ||
197 | #define START_EXCEPTION(n, label) \ | ||
198 | . = n; \ | ||
199 | label: | ||
200 | |||
201 | #define EXCEPTION(n, label, hdlr, xfer) \ | ||
202 | START_EXCEPTION(n, label); \ | ||
203 | NORMAL_EXCEPTION_PROLOG; \ | ||
204 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
205 | xfer(n, hdlr) | ||
206 | |||
207 | #define CRITICAL_EXCEPTION(n, label, hdlr) \ | ||
208 | START_EXCEPTION(n, label); \ | ||
209 | CRITICAL_EXCEPTION_PROLOG; \ | ||
210 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
211 | EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ | ||
212 | NOCOPY, crit_transfer_to_handler, \ | ||
213 | ret_from_crit_exc) | ||
214 | |||
215 | #define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \ | ||
216 | li r10,trap; \ | ||
217 | stw r10,TRAP(r11); \ | ||
218 | lis r10,msr@h; \ | ||
219 | ori r10,r10,msr@l; \ | ||
220 | copyee(r10, r9); \ | ||
221 | bl tfer; \ | ||
222 | .long hdlr; \ | ||
223 | .long ret | ||
224 | |||
225 | #define COPY_EE(d, s) rlwimi d,s,0,16,16 | ||
226 | #define NOCOPY(d, s) | ||
227 | |||
228 | #define EXC_XFER_STD(n, hdlr) \ | ||
229 | EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \ | ||
230 | ret_from_except_full) | ||
231 | |||
232 | #define EXC_XFER_LITE(n, hdlr) \ | ||
233 | EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \ | ||
234 | ret_from_except) | ||
235 | |||
236 | #define EXC_XFER_EE(n, hdlr) \ | ||
237 | EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \ | ||
238 | ret_from_except_full) | ||
239 | |||
240 | #define EXC_XFER_EE_LITE(n, hdlr) \ | ||
241 | EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \ | ||
242 | ret_from_except) | ||
243 | |||
244 | |||
245 | /* | ||
246 | * 0x0100 - Critical Interrupt Exception | ||
247 | */ | ||
248 | CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, UnknownException) | ||
249 | |||
250 | /* | ||
251 | * 0x0200 - Machine Check Exception | ||
252 | */ | ||
253 | CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) | ||
254 | |||
255 | /* | ||
256 | * 0x0300 - Data Storage Exception | ||
257 | * This happens for just a few reasons. U0 set (but we don't do that), | ||
258 | * or zone protection fault (user violation, write to protected page). | ||
259 | * If this is just an update of modified status, we do that quickly | ||
260 | * and exit. Otherwise, we call heavywight functions to do the work. | ||
261 | */ | ||
262 | START_EXCEPTION(0x0300, DataStorage) | ||
263 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
264 | mtspr SPRN_SPRG1, r11 | ||
265 | #ifdef CONFIG_403GCX | ||
266 | stw r12, 0(r0) | ||
267 | stw r9, 4(r0) | ||
268 | mfcr r11 | ||
269 | mfspr r12, SPRN_PID | ||
270 | stw r11, 8(r0) | ||
271 | stw r12, 12(r0) | ||
272 | #else | ||
273 | mtspr SPRN_SPRG4, r12 | ||
274 | mtspr SPRN_SPRG5, r9 | ||
275 | mfcr r11 | ||
276 | mfspr r12, SPRN_PID | ||
277 | mtspr SPRN_SPRG7, r11 | ||
278 | mtspr SPRN_SPRG6, r12 | ||
279 | #endif | ||
280 | |||
281 | /* First, check if it was a zone fault (which means a user | ||
282 | * tried to access a kernel or read-protected page - always | ||
283 | * a SEGV). All other faults here must be stores, so no | ||
284 | * need to check ESR_DST as well. */ | ||
285 | mfspr r10, SPRN_ESR | ||
286 | andis. r10, r10, ESR_DIZ@h | ||
287 | bne 2f | ||
288 | |||
289 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
290 | |||
291 | /* If we are faulting a kernel address, we have to use the | ||
292 | * kernel page tables. | ||
293 | */ | ||
294 | andis. r11, r10, 0x8000 | ||
295 | beq 3f | ||
296 | lis r11, swapper_pg_dir@h | ||
297 | ori r11, r11, swapper_pg_dir@l | ||
298 | li r9, 0 | ||
299 | mtspr SPRN_PID, r9 /* TLB will have 0 TID */ | ||
300 | b 4f | ||
301 | |||
302 | /* Get the PGD for the current thread. | ||
303 | */ | ||
304 | 3: | ||
305 | mfspr r11,SPRN_SPRG3 | ||
306 | lwz r11,PGDIR(r11) | ||
307 | 4: | ||
308 | tophys(r11, r11) | ||
309 | rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ | ||
310 | lwz r11, 0(r11) /* Get L1 entry */ | ||
311 | rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ | ||
312 | beq 2f /* Bail if no table */ | ||
313 | |||
314 | rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ | ||
315 | lwz r11, 0(r12) /* Get Linux PTE */ | ||
316 | |||
317 | andi. r9, r11, _PAGE_RW /* Is it writeable? */ | ||
318 | beq 2f /* Bail if not */ | ||
319 | |||
320 | /* Update 'changed'. | ||
321 | */ | ||
322 | ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE | ||
323 | stw r11, 0(r12) /* Update Linux page table */ | ||
324 | |||
325 | /* Most of the Linux PTE is ready to load into the TLB LO. | ||
326 | * We set ZSEL, where only the LS-bit determines user access. | ||
327 | * We set execute, because we don't have the granularity to | ||
328 | * properly set this at the page level (Linux problem). | ||
329 | * If shared is set, we cause a zero PID->TID load. | ||
330 | * Many of these bits are software only. Bits we don't set | ||
331 | * here we (properly should) assume have the appropriate value. | ||
332 | */ | ||
333 | li r12, 0x0ce2 | ||
334 | andc r11, r11, r12 /* Make sure 20, 21 are zero */ | ||
335 | |||
336 | /* find the TLB index that caused the fault. It has to be here. | ||
337 | */ | ||
338 | tlbsx r9, 0, r10 | ||
339 | |||
340 | tlbwe r11, r9, TLB_DATA /* Load TLB LO */ | ||
341 | |||
342 | /* Done...restore registers and get out of here. | ||
343 | */ | ||
344 | #ifdef CONFIG_403GCX | ||
345 | lwz r12, 12(r0) | ||
346 | lwz r11, 8(r0) | ||
347 | mtspr SPRN_PID, r12 | ||
348 | mtcr r11 | ||
349 | lwz r9, 4(r0) | ||
350 | lwz r12, 0(r0) | ||
351 | #else | ||
352 | mfspr r12, SPRN_SPRG6 | ||
353 | mfspr r11, SPRN_SPRG7 | ||
354 | mtspr SPRN_PID, r12 | ||
355 | mtcr r11 | ||
356 | mfspr r9, SPRN_SPRG5 | ||
357 | mfspr r12, SPRN_SPRG4 | ||
358 | #endif | ||
359 | mfspr r11, SPRN_SPRG1 | ||
360 | mfspr r10, SPRN_SPRG0 | ||
361 | PPC405_ERR77_SYNC | ||
362 | rfi /* Should sync shadow TLBs */ | ||
363 | b . /* prevent prefetch past rfi */ | ||
364 | |||
365 | 2: | ||
366 | /* The bailout. Restore registers to pre-exception conditions | ||
367 | * and call the heavyweights to help us out. | ||
368 | */ | ||
369 | #ifdef CONFIG_403GCX | ||
370 | lwz r12, 12(r0) | ||
371 | lwz r11, 8(r0) | ||
372 | mtspr SPRN_PID, r12 | ||
373 | mtcr r11 | ||
374 | lwz r9, 4(r0) | ||
375 | lwz r12, 0(r0) | ||
376 | #else | ||
377 | mfspr r12, SPRN_SPRG6 | ||
378 | mfspr r11, SPRN_SPRG7 | ||
379 | mtspr SPRN_PID, r12 | ||
380 | mtcr r11 | ||
381 | mfspr r9, SPRN_SPRG5 | ||
382 | mfspr r12, SPRN_SPRG4 | ||
383 | #endif | ||
384 | mfspr r11, SPRN_SPRG1 | ||
385 | mfspr r10, SPRN_SPRG0 | ||
386 | b DataAccess | ||
387 | |||
388 | /* | ||
389 | * 0x0400 - Instruction Storage Exception | ||
390 | * This is caused by a fetch from non-execute or guarded pages. | ||
391 | */ | ||
392 | START_EXCEPTION(0x0400, InstructionAccess) | ||
393 | NORMAL_EXCEPTION_PROLOG | ||
394 | mr r4,r12 /* Pass SRR0 as arg2 */ | ||
395 | li r5,0 /* Pass zero as arg3 */ | ||
396 | EXC_XFER_EE_LITE(0x400, handle_page_fault) | ||
397 | |||
398 | /* 0x0500 - External Interrupt Exception */ | ||
399 | EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) | ||
400 | |||
401 | /* 0x0600 - Alignment Exception */ | ||
402 | START_EXCEPTION(0x0600, Alignment) | ||
403 | NORMAL_EXCEPTION_PROLOG | ||
404 | mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */ | ||
405 | stw r4,_DEAR(r11) | ||
406 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
407 | EXC_XFER_EE(0x600, AlignmentException) | ||
408 | |||
409 | /* 0x0700 - Program Exception */ | ||
410 | START_EXCEPTION(0x0700, ProgramCheck) | ||
411 | NORMAL_EXCEPTION_PROLOG | ||
412 | mfspr r4,SPRN_ESR /* Grab the ESR and save it */ | ||
413 | stw r4,_ESR(r11) | ||
414 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
415 | EXC_XFER_STD(0x700, ProgramCheckException) | ||
416 | |||
417 | EXCEPTION(0x0800, Trap_08, UnknownException, EXC_XFER_EE) | ||
418 | EXCEPTION(0x0900, Trap_09, UnknownException, EXC_XFER_EE) | ||
419 | EXCEPTION(0x0A00, Trap_0A, UnknownException, EXC_XFER_EE) | ||
420 | EXCEPTION(0x0B00, Trap_0B, UnknownException, EXC_XFER_EE) | ||
421 | |||
422 | /* 0x0C00 - System Call Exception */ | ||
423 | START_EXCEPTION(0x0C00, SystemCall) | ||
424 | NORMAL_EXCEPTION_PROLOG | ||
425 | EXC_XFER_EE_LITE(0xc00, DoSyscall) | ||
426 | |||
427 | EXCEPTION(0x0D00, Trap_0D, UnknownException, EXC_XFER_EE) | ||
428 | EXCEPTION(0x0E00, Trap_0E, UnknownException, EXC_XFER_EE) | ||
429 | EXCEPTION(0x0F00, Trap_0F, UnknownException, EXC_XFER_EE) | ||
430 | |||
431 | /* 0x1000 - Programmable Interval Timer (PIT) Exception */ | ||
432 | START_EXCEPTION(0x1000, Decrementer) | ||
433 | NORMAL_EXCEPTION_PROLOG | ||
434 | lis r0,TSR_PIS@h | ||
435 | mtspr SPRN_TSR,r0 /* Clear the PIT exception */ | ||
436 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
437 | EXC_XFER_LITE(0x1000, timer_interrupt) | ||
438 | |||
439 | #if 0 | ||
440 | /* NOTE: | ||
441 | * FIT and WDT handlers are not implemented yet. | ||
442 | */ | ||
443 | |||
444 | /* 0x1010 - Fixed Interval Timer (FIT) Exception | ||
445 | */ | ||
446 | STND_EXCEPTION(0x1010, FITException, UnknownException) | ||
447 | |||
448 | /* 0x1020 - Watchdog Timer (WDT) Exception | ||
449 | */ | ||
450 | |||
451 | CRITICAL_EXCEPTION(0x1020, WDTException, UnknownException) | ||
452 | #endif | ||
453 | |||
454 | /* 0x1100 - Data TLB Miss Exception | ||
455 | * As the name implies, translation is not in the MMU, so search the | ||
456 | * page tables and fix it. The only purpose of this function is to | ||
457 | * load TLB entries from the page table if they exist. | ||
458 | */ | ||
459 | START_EXCEPTION(0x1100, DTLBMiss) | ||
460 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
461 | mtspr SPRN_SPRG1, r11 | ||
462 | #ifdef CONFIG_403GCX | ||
463 | stw r12, 0(r0) | ||
464 | stw r9, 4(r0) | ||
465 | mfcr r11 | ||
466 | mfspr r12, SPRN_PID | ||
467 | stw r11, 8(r0) | ||
468 | stw r12, 12(r0) | ||
469 | #else | ||
470 | mtspr SPRN_SPRG4, r12 | ||
471 | mtspr SPRN_SPRG5, r9 | ||
472 | mfcr r11 | ||
473 | mfspr r12, SPRN_PID | ||
474 | mtspr SPRN_SPRG7, r11 | ||
475 | mtspr SPRN_SPRG6, r12 | ||
476 | #endif | ||
477 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
478 | |||
479 | /* If we are faulting a kernel address, we have to use the | ||
480 | * kernel page tables. | ||
481 | */ | ||
482 | andis. r11, r10, 0x8000 | ||
483 | beq 3f | ||
484 | lis r11, swapper_pg_dir@h | ||
485 | ori r11, r11, swapper_pg_dir@l | ||
486 | li r9, 0 | ||
487 | mtspr SPRN_PID, r9 /* TLB will have 0 TID */ | ||
488 | b 4f | ||
489 | |||
490 | /* Get the PGD for the current thread. | ||
491 | */ | ||
492 | 3: | ||
493 | mfspr r11,SPRN_SPRG3 | ||
494 | lwz r11,PGDIR(r11) | ||
495 | 4: | ||
496 | tophys(r11, r11) | ||
497 | rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ | ||
498 | lwz r12, 0(r11) /* Get L1 entry */ | ||
499 | andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */ | ||
500 | beq 2f /* Bail if no table */ | ||
501 | |||
502 | rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ | ||
503 | lwz r11, 0(r12) /* Get Linux PTE */ | ||
504 | andi. r9, r11, _PAGE_PRESENT | ||
505 | beq 5f | ||
506 | |||
507 | ori r11, r11, _PAGE_ACCESSED | ||
508 | stw r11, 0(r12) | ||
509 | |||
510 | /* Create TLB tag. This is the faulting address plus a static | ||
511 | * set of bits. These are size, valid, E, U0. | ||
512 | */ | ||
513 | li r12, 0x00c0 | ||
514 | rlwimi r10, r12, 0, 20, 31 | ||
515 | |||
516 | b finish_tlb_load | ||
517 | |||
518 | 2: /* Check for possible large-page pmd entry */ | ||
519 | rlwinm. r9, r12, 2, 22, 24 | ||
520 | beq 5f | ||
521 | |||
522 | /* Create TLB tag. This is the faulting address, plus a static | ||
523 | * set of bits (valid, E, U0) plus the size from the PMD. | ||
524 | */ | ||
525 | ori r9, r9, 0x40 | ||
526 | rlwimi r10, r9, 0, 20, 31 | ||
527 | mr r11, r12 | ||
528 | |||
529 | b finish_tlb_load | ||
530 | |||
531 | 5: | ||
532 | /* The bailout. Restore registers to pre-exception conditions | ||
533 | * and call the heavyweights to help us out. | ||
534 | */ | ||
535 | #ifdef CONFIG_403GCX | ||
536 | lwz r12, 12(r0) | ||
537 | lwz r11, 8(r0) | ||
538 | mtspr SPRN_PID, r12 | ||
539 | mtcr r11 | ||
540 | lwz r9, 4(r0) | ||
541 | lwz r12, 0(r0) | ||
542 | #else | ||
543 | mfspr r12, SPRN_SPRG6 | ||
544 | mfspr r11, SPRN_SPRG7 | ||
545 | mtspr SPRN_PID, r12 | ||
546 | mtcr r11 | ||
547 | mfspr r9, SPRN_SPRG5 | ||
548 | mfspr r12, SPRN_SPRG4 | ||
549 | #endif | ||
550 | mfspr r11, SPRN_SPRG1 | ||
551 | mfspr r10, SPRN_SPRG0 | ||
552 | b DataAccess | ||
553 | |||
554 | /* 0x1200 - Instruction TLB Miss Exception | ||
555 | * Nearly the same as above, except we get our information from different | ||
556 | * registers and bailout to a different point. | ||
557 | */ | ||
558 | START_EXCEPTION(0x1200, ITLBMiss) | ||
559 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
560 | mtspr SPRN_SPRG1, r11 | ||
561 | #ifdef CONFIG_403GCX | ||
562 | stw r12, 0(r0) | ||
563 | stw r9, 4(r0) | ||
564 | mfcr r11 | ||
565 | mfspr r12, SPRN_PID | ||
566 | stw r11, 8(r0) | ||
567 | stw r12, 12(r0) | ||
568 | #else | ||
569 | mtspr SPRN_SPRG4, r12 | ||
570 | mtspr SPRN_SPRG5, r9 | ||
571 | mfcr r11 | ||
572 | mfspr r12, SPRN_PID | ||
573 | mtspr SPRN_SPRG7, r11 | ||
574 | mtspr SPRN_SPRG6, r12 | ||
575 | #endif | ||
576 | mfspr r10, SPRN_SRR0 /* Get faulting address */ | ||
577 | |||
578 | /* If we are faulting a kernel address, we have to use the | ||
579 | * kernel page tables. | ||
580 | */ | ||
581 | andis. r11, r10, 0x8000 | ||
582 | beq 3f | ||
583 | lis r11, swapper_pg_dir@h | ||
584 | ori r11, r11, swapper_pg_dir@l | ||
585 | li r9, 0 | ||
586 | mtspr SPRN_PID, r9 /* TLB will have 0 TID */ | ||
587 | b 4f | ||
588 | |||
589 | /* Get the PGD for the current thread. | ||
590 | */ | ||
591 | 3: | ||
592 | mfspr r11,SPRN_SPRG3 | ||
593 | lwz r11,PGDIR(r11) | ||
594 | 4: | ||
595 | tophys(r11, r11) | ||
596 | rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ | ||
597 | lwz r12, 0(r11) /* Get L1 entry */ | ||
598 | andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */ | ||
599 | beq 2f /* Bail if no table */ | ||
600 | |||
601 | rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ | ||
602 | lwz r11, 0(r12) /* Get Linux PTE */ | ||
603 | andi. r9, r11, _PAGE_PRESENT | ||
604 | beq 5f | ||
605 | |||
606 | ori r11, r11, _PAGE_ACCESSED | ||
607 | stw r11, 0(r12) | ||
608 | |||
609 | /* Create TLB tag. This is the faulting address plus a static | ||
610 | * set of bits. These are size, valid, E, U0. | ||
611 | */ | ||
612 | li r12, 0x00c0 | ||
613 | rlwimi r10, r12, 0, 20, 31 | ||
614 | |||
615 | b finish_tlb_load | ||
616 | |||
617 | 2: /* Check for possible large-page pmd entry */ | ||
618 | rlwinm. r9, r12, 2, 22, 24 | ||
619 | beq 5f | ||
620 | |||
621 | /* Create TLB tag. This is the faulting address, plus a static | ||
622 | * set of bits (valid, E, U0) plus the size from the PMD. | ||
623 | */ | ||
624 | ori r9, r9, 0x40 | ||
625 | rlwimi r10, r9, 0, 20, 31 | ||
626 | mr r11, r12 | ||
627 | |||
628 | b finish_tlb_load | ||
629 | |||
630 | 5: | ||
631 | /* The bailout. Restore registers to pre-exception conditions | ||
632 | * and call the heavyweights to help us out. | ||
633 | */ | ||
634 | #ifdef CONFIG_403GCX | ||
635 | lwz r12, 12(r0) | ||
636 | lwz r11, 8(r0) | ||
637 | mtspr SPRN_PID, r12 | ||
638 | mtcr r11 | ||
639 | lwz r9, 4(r0) | ||
640 | lwz r12, 0(r0) | ||
641 | #else | ||
642 | mfspr r12, SPRN_SPRG6 | ||
643 | mfspr r11, SPRN_SPRG7 | ||
644 | mtspr SPRN_PID, r12 | ||
645 | mtcr r11 | ||
646 | mfspr r9, SPRN_SPRG5 | ||
647 | mfspr r12, SPRN_SPRG4 | ||
648 | #endif | ||
649 | mfspr r11, SPRN_SPRG1 | ||
650 | mfspr r10, SPRN_SPRG0 | ||
651 | b InstructionAccess | ||
652 | |||
653 | EXCEPTION(0x1300, Trap_13, UnknownException, EXC_XFER_EE) | ||
654 | EXCEPTION(0x1400, Trap_14, UnknownException, EXC_XFER_EE) | ||
655 | EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) | ||
656 | EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) | ||
657 | #ifdef CONFIG_IBM405_ERR51 | ||
658 | /* 405GP errata 51 */ | ||
659 | START_EXCEPTION(0x1700, Trap_17) | ||
660 | b DTLBMiss | ||
661 | #else | ||
662 | EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE) | ||
663 | #endif | ||
664 | EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) | ||
665 | EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) | ||
666 | EXCEPTION(0x1A00, Trap_1A, UnknownException, EXC_XFER_EE) | ||
667 | EXCEPTION(0x1B00, Trap_1B, UnknownException, EXC_XFER_EE) | ||
668 | EXCEPTION(0x1C00, Trap_1C, UnknownException, EXC_XFER_EE) | ||
669 | EXCEPTION(0x1D00, Trap_1D, UnknownException, EXC_XFER_EE) | ||
670 | EXCEPTION(0x1E00, Trap_1E, UnknownException, EXC_XFER_EE) | ||
671 | EXCEPTION(0x1F00, Trap_1F, UnknownException, EXC_XFER_EE) | ||
672 | |||
673 | /* Check for a single step debug exception while in an exception | ||
674 | * handler before state has been saved. This is to catch the case | ||
675 | * where an instruction that we are trying to single step causes | ||
676 | * an exception (eg ITLB/DTLB miss) and thus the first instruction of | ||
677 | * the exception handler generates a single step debug exception. | ||
678 | * | ||
679 | * If we get a debug trap on the first instruction of an exception handler, | ||
680 | * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is | ||
681 | * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR). | ||
682 | * The exception handler was handling a non-critical interrupt, so it will | ||
683 | * save (and later restore) the MSR via SPRN_SRR1, which will still have | ||
684 | * the MSR_DE bit set. | ||
685 | */ | ||
686 | /* 0x2000 - Debug Exception */ | ||
687 | START_EXCEPTION(0x2000, DebugTrap) | ||
688 | CRITICAL_EXCEPTION_PROLOG | ||
689 | |||
690 | /* | ||
691 | * If this is a single step or branch-taken exception in an | ||
692 | * exception entry sequence, it was probably meant to apply to | ||
693 | * the code where the exception occurred (since exception entry | ||
694 | * doesn't turn off DE automatically). We simulate the effect | ||
695 | * of turning off DE on entry to an exception handler by turning | ||
696 | * off DE in the SRR3 value and clearing the debug status. | ||
697 | */ | ||
698 | mfspr r10,SPRN_DBSR /* check single-step/branch taken */ | ||
699 | andis. r10,r10,DBSR_IC@h | ||
700 | beq+ 2f | ||
701 | |||
702 | andi. r10,r9,MSR_IR|MSR_PR /* check supervisor + MMU off */ | ||
703 | beq 1f /* branch and fix it up */ | ||
704 | |||
705 | mfspr r10,SPRN_SRR2 /* Faulting instruction address */ | ||
706 | cmplwi r10,0x2100 | ||
707 | bgt+ 2f /* address above exception vectors */ | ||
708 | |||
709 | /* here it looks like we got an inappropriate debug exception. */ | ||
710 | 1: rlwinm r9,r9,0,~MSR_DE /* clear DE in the SRR3 value */ | ||
711 | lis r10,DBSR_IC@h /* clear the IC event */ | ||
712 | mtspr SPRN_DBSR,r10 | ||
713 | /* restore state and get out */ | ||
714 | lwz r10,_CCR(r11) | ||
715 | lwz r0,GPR0(r11) | ||
716 | lwz r1,GPR1(r11) | ||
717 | mtcrf 0x80,r10 | ||
718 | mtspr SPRN_SRR2,r12 | ||
719 | mtspr SPRN_SRR3,r9 | ||
720 | lwz r9,GPR9(r11) | ||
721 | lwz r12,GPR12(r11) | ||
722 | lwz r10,crit_r10@l(0) | ||
723 | lwz r11,crit_r11@l(0) | ||
724 | PPC405_ERR77_SYNC | ||
725 | rfci | ||
726 | b . | ||
727 | |||
728 | /* continue normal handling for a critical exception... */ | ||
729 | 2: mfspr r4,SPRN_DBSR | ||
730 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
731 | EXC_XFER_TEMPLATE(DebugException, 0x2002, \ | ||
732 | (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ | ||
733 | NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) | ||
734 | |||
735 | /* | ||
736 | * The other Data TLB exceptions bail out to this point | ||
737 | * if they can't resolve the lightweight TLB fault. | ||
738 | */ | ||
739 | DataAccess: | ||
740 | NORMAL_EXCEPTION_PROLOG | ||
741 | mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ | ||
742 | stw r5,_ESR(r11) | ||
743 | mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ | ||
744 | EXC_XFER_EE_LITE(0x300, handle_page_fault) | ||
745 | |||
746 | /* Other PowerPC processors, namely those derived from the 6xx-series | ||
747 | * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved. | ||
748 | * However, for the 4xx-series processors these are neither defined nor | ||
749 | * reserved. | ||
750 | */ | ||
751 | |||
752 | /* Damn, I came up one instruction too many to fit into the | ||
753 | * exception space :-). Both the instruction and data TLB | ||
754 | * miss get to this point to load the TLB. | ||
755 | * r10 - TLB_TAG value | ||
756 | * r11 - Linux PTE | ||
757 | * r12, r9 - avilable to use | ||
758 | * PID - loaded with proper value when we get here | ||
759 | * Upon exit, we reload everything and RFI. | ||
760 | * Actually, it will fit now, but oh well.....a common place | ||
761 | * to load the TLB. | ||
762 | */ | ||
763 | tlb_4xx_index: | ||
764 | .long 0 | ||
765 | finish_tlb_load: | ||
766 | /* load the next available TLB index. | ||
767 | */ | ||
768 | lwz r9, tlb_4xx_index@l(0) | ||
769 | addi r9, r9, 1 | ||
770 | andi. r9, r9, (PPC4XX_TLB_SIZE-1) | ||
771 | stw r9, tlb_4xx_index@l(0) | ||
772 | |||
773 | 6: | ||
774 | /* | ||
775 | * Clear out the software-only bits in the PTE to generate the | ||
776 | * TLB_DATA value. These are the bottom 2 bits of the RPM, the | ||
777 | * top 3 bits of the zone field, and M. | ||
778 | */ | ||
779 | li r12, 0x0ce2 | ||
780 | andc r11, r11, r12 | ||
781 | |||
782 | tlbwe r11, r9, TLB_DATA /* Load TLB LO */ | ||
783 | tlbwe r10, r9, TLB_TAG /* Load TLB HI */ | ||
784 | |||
785 | /* Done...restore registers and get out of here. | ||
786 | */ | ||
787 | #ifdef CONFIG_403GCX | ||
788 | lwz r12, 12(r0) | ||
789 | lwz r11, 8(r0) | ||
790 | mtspr SPRN_PID, r12 | ||
791 | mtcr r11 | ||
792 | lwz r9, 4(r0) | ||
793 | lwz r12, 0(r0) | ||
794 | #else | ||
795 | mfspr r12, SPRN_SPRG6 | ||
796 | mfspr r11, SPRN_SPRG7 | ||
797 | mtspr SPRN_PID, r12 | ||
798 | mtcr r11 | ||
799 | mfspr r9, SPRN_SPRG5 | ||
800 | mfspr r12, SPRN_SPRG4 | ||
801 | #endif | ||
802 | mfspr r11, SPRN_SPRG1 | ||
803 | mfspr r10, SPRN_SPRG0 | ||
804 | PPC405_ERR77_SYNC | ||
805 | rfi /* Should sync shadow TLBs */ | ||
806 | b . /* prevent prefetch past rfi */ | ||
807 | |||
808 | /* extern void giveup_fpu(struct task_struct *prev) | ||
809 | * | ||
810 | * The PowerPC 4xx family of processors do not have an FPU, so this just | ||
811 | * returns. | ||
812 | */ | ||
813 | _GLOBAL(giveup_fpu) | ||
814 | blr | ||
815 | |||
816 | /* This is where the main kernel code starts. | ||
817 | */ | ||
818 | start_here: | ||
819 | |||
820 | /* ptr to current */ | ||
821 | lis r2,init_task@h | ||
822 | ori r2,r2,init_task@l | ||
823 | |||
824 | /* ptr to phys current thread */ | ||
825 | tophys(r4,r2) | ||
826 | addi r4,r4,THREAD /* init task's THREAD */ | ||
827 | mtspr SPRN_SPRG3,r4 | ||
828 | |||
829 | /* stack */ | ||
830 | lis r1,init_thread_union@ha | ||
831 | addi r1,r1,init_thread_union@l | ||
832 | li r0,0 | ||
833 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | ||
834 | |||
835 | bl early_init /* We have to do this with MMU on */ | ||
836 | |||
837 | /* | ||
838 | * Decide what sort of machine this is and initialize the MMU. | ||
839 | */ | ||
840 | mr r3,r31 | ||
841 | mr r4,r30 | ||
842 | mr r5,r29 | ||
843 | mr r6,r28 | ||
844 | mr r7,r27 | ||
845 | bl machine_init | ||
846 | bl MMU_init | ||
847 | |||
848 | /* Go back to running unmapped so we can load up new values | ||
849 | * and change to using our exception vectors. | ||
850 | * On the 4xx, all we have to do is invalidate the TLB to clear | ||
851 | * the old 16M byte TLB mappings. | ||
852 | */ | ||
853 | lis r4,2f@h | ||
854 | ori r4,r4,2f@l | ||
855 | tophys(r4,r4) | ||
856 | lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h | ||
857 | ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l | ||
858 | mtspr SPRN_SRR0,r4 | ||
859 | mtspr SPRN_SRR1,r3 | ||
860 | rfi | ||
861 | b . /* prevent prefetch past rfi */ | ||
862 | |||
863 | /* Load up the kernel context */ | ||
864 | 2: | ||
865 | sync /* Flush to memory before changing TLB */ | ||
866 | tlbia | ||
867 | isync /* Flush shadow TLBs */ | ||
868 | |||
869 | /* set up the PTE pointers for the Abatron bdiGDB. | ||
870 | */ | ||
871 | lis r6, swapper_pg_dir@h | ||
872 | ori r6, r6, swapper_pg_dir@l | ||
873 | lis r5, abatron_pteptrs@h | ||
874 | ori r5, r5, abatron_pteptrs@l | ||
875 | stw r5, 0xf0(r0) /* Must match your Abatron config file */ | ||
876 | tophys(r5,r5) | ||
877 | stw r6, 0(r5) | ||
878 | |||
879 | /* Now turn on the MMU for real! */ | ||
880 | lis r4,MSR_KERNEL@h | ||
881 | ori r4,r4,MSR_KERNEL@l | ||
882 | lis r3,start_kernel@h | ||
883 | ori r3,r3,start_kernel@l | ||
884 | mtspr SPRN_SRR0,r3 | ||
885 | mtspr SPRN_SRR1,r4 | ||
886 | rfi /* enable MMU and jump to start_kernel */ | ||
887 | b . /* prevent prefetch past rfi */ | ||
888 | |||
889 | /* Set up the initial MMU state so we can do the first level of | ||
890 | * kernel initialization. This maps the first 16 MBytes of memory 1:1 | ||
891 | * virtual to physical and more importantly sets the cache mode. | ||
892 | */ | ||
893 | initial_mmu: | ||
894 | tlbia /* Invalidate all TLB entries */ | ||
895 | isync | ||
896 | |||
897 | /* We should still be executing code at physical address 0x0000xxxx | ||
898 | * at this point. However, start_here is at virtual address | ||
899 | * 0xC000xxxx. So, set up a TLB mapping to cover this once | ||
900 | * translation is enabled. | ||
901 | */ | ||
902 | |||
903 | lis r3,KERNELBASE@h /* Load the kernel virtual address */ | ||
904 | ori r3,r3,KERNELBASE@l | ||
905 | tophys(r4,r3) /* Load the kernel physical address */ | ||
906 | |||
907 | iccci r0,r3 /* Invalidate the i-cache before use */ | ||
908 | |||
909 | /* Load the kernel PID. | ||
910 | */ | ||
911 | li r0,0 | ||
912 | mtspr SPRN_PID,r0 | ||
913 | sync | ||
914 | |||
915 | /* Configure and load two entries into TLB slots 62 and 63. | ||
916 | * In case we are pinning TLBs, these are reserved in by the | ||
917 | * other TLB functions. If not reserving, then it doesn't | ||
918 | * matter where they are loaded. | ||
919 | */ | ||
920 | clrrwi r4,r4,10 /* Mask off the real page number */ | ||
921 | ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ | ||
922 | |||
923 | clrrwi r3,r3,10 /* Mask off the effective page number */ | ||
924 | ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M)) | ||
925 | |||
926 | li r0,63 /* TLB slot 63 */ | ||
927 | |||
928 | tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ | ||
929 | tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ | ||
930 | |||
931 | #if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE) | ||
932 | |||
933 | /* Load a TLB entry for the UART, so that ppc4xx_progress() can use | ||
934 | * the UARTs nice and early. We use a 4k real==virtual mapping. */ | ||
935 | |||
936 | lis r3,SERIAL_DEBUG_IO_BASE@h | ||
937 | ori r3,r3,SERIAL_DEBUG_IO_BASE@l | ||
938 | mr r4,r3 | ||
939 | clrrwi r4,r4,12 | ||
940 | ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G) | ||
941 | |||
942 | clrrwi r3,r3,12 | ||
943 | ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K)) | ||
944 | |||
945 | li r0,0 /* TLB slot 0 */ | ||
946 | tlbwe r4,r0,TLB_DATA | ||
947 | tlbwe r3,r0,TLB_TAG | ||
948 | #endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */ | ||
949 | |||
950 | isync | ||
951 | |||
952 | /* Establish the exception vector base | ||
953 | */ | ||
954 | lis r4,KERNELBASE@h /* EVPR only uses the high 16-bits */ | ||
955 | tophys(r0,r4) /* Use the physical address */ | ||
956 | mtspr SPRN_EVPR,r0 | ||
957 | |||
958 | blr | ||
959 | |||
960 | _GLOBAL(abort) | ||
961 | mfspr r13,SPRN_DBCR0 | ||
962 | oris r13,r13,DBCR0_RST_SYSTEM@h | ||
963 | mtspr SPRN_DBCR0,r13 | ||
964 | |||
965 | _GLOBAL(set_context) | ||
966 | |||
967 | #ifdef CONFIG_BDI_SWITCH | ||
968 | /* Context switch the PTE pointer for the Abatron BDI2000. | ||
969 | * The PGDIR is the second parameter. | ||
970 | */ | ||
971 | lis r5, KERNELBASE@h | ||
972 | lwz r5, 0xf0(r5) | ||
973 | stw r4, 0x4(r5) | ||
974 | #endif | ||
975 | sync | ||
976 | mtspr SPRN_PID,r3 | ||
977 | isync /* Need an isync to flush shadow */ | ||
978 | /* TLBs after changing PID */ | ||
979 | blr | ||
980 | |||
981 | /* We put a few things here that have to be page-aligned. This stuff | ||
982 | * goes at the beginning of the data segment, which is page-aligned. | ||
983 | */ | ||
984 | .data | ||
985 | _GLOBAL(sdata) | ||
986 | _GLOBAL(empty_zero_page) | ||
987 | .space 4096 | ||
988 | _GLOBAL(swapper_pg_dir) | ||
989 | .space 4096 | ||
990 | |||
991 | |||
992 | /* Stack for handling critical exceptions from kernel mode */ | ||
993 | .section .bss | ||
994 | .align 12 | ||
995 | exception_stack_bottom: | ||
996 | .space 4096 | ||
997 | critical_stack_top: | ||
998 | _GLOBAL(exception_stack_top) | ||
999 | |||
1000 | /* This space gets a copy of optional info passed to us by the bootstrap | ||
1001 | * which is used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
1002 | */ | ||
1003 | _GLOBAL(cmd_line) | ||
1004 | .space 512 | ||
1005 | |||
1006 | /* Room for two PTE pointers, usually the kernel and current user pointers | ||
1007 | * to their respective root page table. | ||
1008 | */ | ||
1009 | abatron_pteptrs: | ||
1010 | .space 8 | ||
diff --git a/arch/ppc/kernel/head_8xx.S b/arch/ppc/kernel/head_8xx.S new file mode 100644 index 000000000000..5a7a64e91fc5 --- /dev/null +++ b/arch/ppc/kernel/head_8xx.S | |||
@@ -0,0 +1,862 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/except_8xx.S | ||
3 | * | ||
4 | * PowerPC version | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | ||
7 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
8 | * Low-level exception handlers and MMU support | ||
9 | * rewritten by Paul Mackerras. | ||
10 | * Copyright (C) 1996 Paul Mackerras. | ||
11 | * MPC8xx modifications by Dan Malek | ||
12 | * Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | ||
13 | * | ||
14 | * This file contains low-level support and setup for PowerPC 8xx | ||
15 | * embedded processors, including trap and interrupt dispatch. | ||
16 | * | ||
17 | * This program is free software; you can redistribute it and/or | ||
18 | * modify it under the terms of the GNU General Public License | ||
19 | * as published by the Free Software Foundation; either version | ||
20 | * 2 of the License, or (at your option) any later version. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/config.h> | ||
25 | #include <asm/processor.h> | ||
26 | #include <asm/page.h> | ||
27 | #include <asm/mmu.h> | ||
28 | #include <asm/cache.h> | ||
29 | #include <asm/pgtable.h> | ||
30 | #include <asm/cputable.h> | ||
31 | #include <asm/thread_info.h> | ||
32 | #include <asm/ppc_asm.h> | ||
33 | #include <asm/offsets.h> | ||
34 | |||
35 | /* Macro to make the code more readable. */ | ||
36 | #ifdef CONFIG_8xx_CPU6 | ||
37 | #define DO_8xx_CPU6(val, reg) \ | ||
38 | li reg, val; \ | ||
39 | stw reg, 12(r0); \ | ||
40 | lwz reg, 12(r0); | ||
41 | #else | ||
42 | #define DO_8xx_CPU6(val, reg) | ||
43 | #endif | ||
44 | .text | ||
45 | .globl _stext | ||
46 | _stext: | ||
47 | .text | ||
48 | .globl _start | ||
49 | _start: | ||
50 | |||
51 | /* MPC8xx | ||
52 | * This port was done on an MBX board with an 860. Right now I only | ||
53 | * support an ELF compressed (zImage) boot from EPPC-Bug because the | ||
54 | * code there loads up some registers before calling us: | ||
55 | * r3: ptr to board info data | ||
56 | * r4: initrd_start or if no initrd then 0 | ||
57 | * r5: initrd_end - unused if r4 is 0 | ||
58 | * r6: Start of command line string | ||
59 | * r7: End of command line string | ||
60 | * | ||
61 | * I decided to use conditional compilation instead of checking PVR and | ||
62 | * adding more processor specific branches around code I don't need. | ||
63 | * Since this is an embedded processor, I also appreciate any memory | ||
64 | * savings I can get. | ||
65 | * | ||
66 | * The MPC8xx does not have any BATs, but it supports large page sizes. | ||
67 | * We first initialize the MMU to support 8M byte pages, then load one | ||
68 | * entry into each of the instruction and data TLBs to map the first | ||
69 | * 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to | ||
70 | * the "internal" processor registers before MMU_init is called. | ||
71 | * | ||
72 | * The TLB code currently contains a major hack. Since I use the condition | ||
73 | * code register, I have to save and restore it. I am out of registers, so | ||
74 | * I just store it in memory location 0 (the TLB handlers are not reentrant). | ||
75 | * To avoid making any decisions, I need to use the "segment" valid bit | ||
76 | * in the first level table, but that would require many changes to the | ||
77 | * Linux page directory/table functions that I don't want to do right now. | ||
78 | * | ||
79 | * I used to use SPRG2 for a temporary register in the TLB handler, but it | ||
80 | * has since been put to other uses. I now use a hack to save a register | ||
81 | * and the CCR at memory location 0.....Someday I'll fix this..... | ||
82 | * -- Dan | ||
83 | */ | ||
84 | .globl __start | ||
85 | __start: | ||
86 | mr r31,r3 /* save parameters */ | ||
87 | mr r30,r4 | ||
88 | mr r29,r5 | ||
89 | mr r28,r6 | ||
90 | mr r27,r7 | ||
91 | |||
92 | /* We have to turn on the MMU right away so we get cache modes | ||
93 | * set correctly. | ||
94 | */ | ||
95 | bl initial_mmu | ||
96 | |||
97 | /* We now have the lower 8 Meg mapped into TLB entries, and the caches | ||
98 | * ready to work. | ||
99 | */ | ||
100 | |||
101 | turn_on_mmu: | ||
102 | mfmsr r0 | ||
103 | ori r0,r0,MSR_DR|MSR_IR | ||
104 | mtspr SPRN_SRR1,r0 | ||
105 | lis r0,start_here@h | ||
106 | ori r0,r0,start_here@l | ||
107 | mtspr SPRN_SRR0,r0 | ||
108 | SYNC | ||
109 | rfi /* enables MMU */ | ||
110 | |||
111 | /* | ||
112 | * Exception entry code. This code runs with address translation | ||
113 | * turned off, i.e. using physical addresses. | ||
114 | * We assume sprg3 has the physical address of the current | ||
115 | * task's thread_struct. | ||
116 | */ | ||
117 | #define EXCEPTION_PROLOG \ | ||
118 | mtspr SPRN_SPRG0,r10; \ | ||
119 | mtspr SPRN_SPRG1,r11; \ | ||
120 | mfcr r10; \ | ||
121 | EXCEPTION_PROLOG_1; \ | ||
122 | EXCEPTION_PROLOG_2 | ||
123 | |||
124 | #define EXCEPTION_PROLOG_1 \ | ||
125 | mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \ | ||
126 | andi. r11,r11,MSR_PR; \ | ||
127 | tophys(r11,r1); /* use tophys(r1) if kernel */ \ | ||
128 | beq 1f; \ | ||
129 | mfspr r11,SPRN_SPRG3; \ | ||
130 | lwz r11,THREAD_INFO-THREAD(r11); \ | ||
131 | addi r11,r11,THREAD_SIZE; \ | ||
132 | tophys(r11,r11); \ | ||
133 | 1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */ | ||
134 | |||
135 | |||
136 | #define EXCEPTION_PROLOG_2 \ | ||
137 | CLR_TOP32(r11); \ | ||
138 | stw r10,_CCR(r11); /* save registers */ \ | ||
139 | stw r12,GPR12(r11); \ | ||
140 | stw r9,GPR9(r11); \ | ||
141 | mfspr r10,SPRN_SPRG0; \ | ||
142 | stw r10,GPR10(r11); \ | ||
143 | mfspr r12,SPRN_SPRG1; \ | ||
144 | stw r12,GPR11(r11); \ | ||
145 | mflr r10; \ | ||
146 | stw r10,_LINK(r11); \ | ||
147 | mfspr r12,SPRN_SRR0; \ | ||
148 | mfspr r9,SPRN_SRR1; \ | ||
149 | stw r1,GPR1(r11); \ | ||
150 | stw r1,0(r11); \ | ||
151 | tovirt(r1,r11); /* set new kernel sp */ \ | ||
152 | li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ | ||
153 | MTMSRD(r10); /* (except for mach check in rtas) */ \ | ||
154 | stw r0,GPR0(r11); \ | ||
155 | SAVE_4GPRS(3, r11); \ | ||
156 | SAVE_2GPRS(7, r11) | ||
157 | |||
158 | /* | ||
159 | * Note: code which follows this uses cr0.eq (set if from kernel), | ||
160 | * r11, r12 (SRR0), and r9 (SRR1). | ||
161 | * | ||
162 | * Note2: once we have set r1 we are in a position to take exceptions | ||
163 | * again, and we could thus set MSR:RI at that point. | ||
164 | */ | ||
165 | |||
166 | /* | ||
167 | * Exception vectors. | ||
168 | */ | ||
169 | #define EXCEPTION(n, label, hdlr, xfer) \ | ||
170 | . = n; \ | ||
171 | label: \ | ||
172 | EXCEPTION_PROLOG; \ | ||
173 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
174 | xfer(n, hdlr) | ||
175 | |||
176 | #define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \ | ||
177 | li r10,trap; \ | ||
178 | stw r10,TRAP(r11); \ | ||
179 | li r10,MSR_KERNEL; \ | ||
180 | copyee(r10, r9); \ | ||
181 | bl tfer; \ | ||
182 | i##n: \ | ||
183 | .long hdlr; \ | ||
184 | .long ret | ||
185 | |||
186 | #define COPY_EE(d, s) rlwimi d,s,0,16,16 | ||
187 | #define NOCOPY(d, s) | ||
188 | |||
189 | #define EXC_XFER_STD(n, hdlr) \ | ||
190 | EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \ | ||
191 | ret_from_except_full) | ||
192 | |||
193 | #define EXC_XFER_LITE(n, hdlr) \ | ||
194 | EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \ | ||
195 | ret_from_except) | ||
196 | |||
197 | #define EXC_XFER_EE(n, hdlr) \ | ||
198 | EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \ | ||
199 | ret_from_except_full) | ||
200 | |||
201 | #define EXC_XFER_EE_LITE(n, hdlr) \ | ||
202 | EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \ | ||
203 | ret_from_except) | ||
204 | |||
205 | /* System reset */ | ||
206 | EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD) | ||
207 | |||
208 | /* Machine check */ | ||
209 | . = 0x200 | ||
210 | MachineCheck: | ||
211 | EXCEPTION_PROLOG | ||
212 | mfspr r4,SPRN_DAR | ||
213 | stw r4,_DAR(r11) | ||
214 | mfspr r5,SPRN_DSISR | ||
215 | stw r5,_DSISR(r11) | ||
216 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
217 | EXC_XFER_STD(0x200, MachineCheckException) | ||
218 | |||
219 | /* Data access exception. | ||
220 | * This is "never generated" by the MPC8xx. We jump to it for other | ||
221 | * translation errors. | ||
222 | */ | ||
223 | . = 0x300 | ||
224 | DataAccess: | ||
225 | EXCEPTION_PROLOG | ||
226 | mfspr r10,SPRN_DSISR | ||
227 | stw r10,_DSISR(r11) | ||
228 | mr r5,r10 | ||
229 | mfspr r4,SPRN_DAR | ||
230 | EXC_XFER_EE_LITE(0x300, handle_page_fault) | ||
231 | |||
232 | /* Instruction access exception. | ||
233 | * This is "never generated" by the MPC8xx. We jump to it for other | ||
234 | * translation errors. | ||
235 | */ | ||
236 | . = 0x400 | ||
237 | InstructionAccess: | ||
238 | EXCEPTION_PROLOG | ||
239 | mr r4,r12 | ||
240 | mr r5,r9 | ||
241 | EXC_XFER_EE_LITE(0x400, handle_page_fault) | ||
242 | |||
243 | /* External interrupt */ | ||
244 | EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) | ||
245 | |||
246 | /* Alignment exception */ | ||
247 | . = 0x600 | ||
248 | Alignment: | ||
249 | EXCEPTION_PROLOG | ||
250 | mfspr r4,SPRN_DAR | ||
251 | stw r4,_DAR(r11) | ||
252 | mfspr r5,SPRN_DSISR | ||
253 | stw r5,_DSISR(r11) | ||
254 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
255 | EXC_XFER_EE(0x600, AlignmentException) | ||
256 | |||
257 | /* Program check exception */ | ||
258 | EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD) | ||
259 | |||
260 | /* No FPU on MPC8xx. This exception is not supposed to happen. | ||
261 | */ | ||
262 | EXCEPTION(0x800, FPUnavailable, UnknownException, EXC_XFER_STD) | ||
263 | |||
264 | /* Decrementer */ | ||
265 | EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) | ||
266 | |||
267 | EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE) | ||
268 | EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE) | ||
269 | |||
270 | /* System call */ | ||
271 | . = 0xc00 | ||
272 | SystemCall: | ||
273 | EXCEPTION_PROLOG | ||
274 | EXC_XFER_EE_LITE(0xc00, DoSyscall) | ||
275 | |||
276 | /* Single step - not used on 601 */ | ||
277 | EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD) | ||
278 | EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE) | ||
279 | EXCEPTION(0xf00, Trap_0f, UnknownException, EXC_XFER_EE) | ||
280 | |||
281 | /* On the MPC8xx, this is a software emulation interrupt. It occurs | ||
282 | * for all unimplemented and illegal instructions. | ||
283 | */ | ||
284 | EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD) | ||
285 | |||
286 | . = 0x1100 | ||
287 | /* | ||
288 | * For the MPC8xx, this is a software tablewalk to load the instruction | ||
289 | * TLB. It is modelled after the example in the Motorola manual. The task | ||
290 | * switch loads the M_TWB register with the pointer to the first level table. | ||
291 | * If we discover there is no second level table (the value is zero), the | ||
292 | * plan was to load that into the TLB, which causes another fault into the | ||
293 | * TLB Error interrupt where we can handle such problems. However, that did | ||
294 | * not work, so if we discover there is no second level table, we restore | ||
295 | * registers and branch to the error exception. We have to use the MD_xxx | ||
296 | * registers for the tablewalk because the equivalent MI_xxx registers | ||
297 | * only perform the attribute functions. | ||
298 | */ | ||
299 | InstructionTLBMiss: | ||
300 | #ifdef CONFIG_8xx_CPU6 | ||
301 | stw r3, 8(r0) | ||
302 | #endif | ||
303 | DO_8xx_CPU6(0x3f80, r3) | ||
304 | mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ | ||
305 | mfcr r10 | ||
306 | stw r10, 0(r0) | ||
307 | stw r11, 4(r0) | ||
308 | mfspr r10, SPRN_SRR0 /* Get effective address of fault */ | ||
309 | DO_8xx_CPU6(0x3780, r3) | ||
310 | mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */ | ||
311 | mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ | ||
312 | |||
313 | /* If we are faulting a kernel address, we have to use the | ||
314 | * kernel page tables. | ||
315 | */ | ||
316 | andi. r11, r10, 0x0800 /* Address >= 0x80000000 */ | ||
317 | beq 3f | ||
318 | lis r11, swapper_pg_dir@h | ||
319 | ori r11, r11, swapper_pg_dir@l | ||
320 | rlwimi r10, r11, 0, 2, 19 | ||
321 | 3: | ||
322 | lwz r11, 0(r10) /* Get the level 1 entry */ | ||
323 | rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ | ||
324 | beq 2f /* If zero, don't try to find a pte */ | ||
325 | |||
326 | /* We have a pte table, so load the MI_TWC with the attributes | ||
327 | * for this "segment." | ||
328 | */ | ||
329 | ori r11,r11,1 /* Set valid bit */ | ||
330 | DO_8xx_CPU6(0x2b80, r3) | ||
331 | mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ | ||
332 | DO_8xx_CPU6(0x3b80, r3) | ||
333 | mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ | ||
334 | mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ | ||
335 | lwz r10, 0(r11) /* Get the pte */ | ||
336 | |||
337 | ori r10, r10, _PAGE_ACCESSED | ||
338 | stw r10, 0(r11) | ||
339 | |||
340 | /* The Linux PTE won't go exactly into the MMU TLB. | ||
341 | * Software indicator bits 21, 22 and 28 must be clear. | ||
342 | * Software indicator bits 24, 25, 26, and 27 must be | ||
343 | * set. All other Linux PTE bits control the behavior | ||
344 | * of the MMU. | ||
345 | */ | ||
346 | 2: li r11, 0x00f0 | ||
347 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ | ||
348 | DO_8xx_CPU6(0x2d80, r3) | ||
349 | mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ | ||
350 | |||
351 | mfspr r10, SPRN_M_TW /* Restore registers */ | ||
352 | lwz r11, 0(r0) | ||
353 | mtcr r11 | ||
354 | lwz r11, 4(r0) | ||
355 | #ifdef CONFIG_8xx_CPU6 | ||
356 | lwz r3, 8(r0) | ||
357 | #endif | ||
358 | rfi | ||
359 | |||
360 | . = 0x1200 | ||
361 | DataStoreTLBMiss: | ||
362 | #ifdef CONFIG_8xx_CPU6 | ||
363 | stw r3, 8(r0) | ||
364 | #endif | ||
365 | DO_8xx_CPU6(0x3f80, r3) | ||
366 | mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ | ||
367 | mfcr r10 | ||
368 | stw r10, 0(r0) | ||
369 | stw r11, 4(r0) | ||
370 | mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ | ||
371 | |||
372 | /* If we are faulting a kernel address, we have to use the | ||
373 | * kernel page tables. | ||
374 | */ | ||
375 | andi. r11, r10, 0x0800 | ||
376 | beq 3f | ||
377 | lis r11, swapper_pg_dir@h | ||
378 | ori r11, r11, swapper_pg_dir@l | ||
379 | rlwimi r10, r11, 0, 2, 19 | ||
380 | 3: | ||
381 | lwz r11, 0(r10) /* Get the level 1 entry */ | ||
382 | rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ | ||
383 | beq 2f /* If zero, don't try to find a pte */ | ||
384 | |||
385 | /* We have a pte table, so load fetch the pte from the table. | ||
386 | */ | ||
387 | ori r11, r11, 1 /* Set valid bit in physical L2 page */ | ||
388 | DO_8xx_CPU6(0x3b80, r3) | ||
389 | mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ | ||
390 | mfspr r10, SPRN_MD_TWC /* ....and get the pte address */ | ||
391 | lwz r10, 0(r10) /* Get the pte */ | ||
392 | |||
393 | /* Insert the Guarded flag into the TWC from the Linux PTE. | ||
394 | * It is bit 27 of both the Linux PTE and the TWC (at least | ||
395 | * I got that right :-). It will be better when we can put | ||
396 | * this into the Linux pgd/pmd and load it in the operation | ||
397 | * above. | ||
398 | */ | ||
399 | rlwimi r11, r10, 0, 27, 27 | ||
400 | DO_8xx_CPU6(0x3b80, r3) | ||
401 | mtspr SPRN_MD_TWC, r11 | ||
402 | |||
403 | mfspr r11, SPRN_MD_TWC /* get the pte address again */ | ||
404 | ori r10, r10, _PAGE_ACCESSED | ||
405 | stw r10, 0(r11) | ||
406 | |||
407 | /* The Linux PTE won't go exactly into the MMU TLB. | ||
408 | * Software indicator bits 21, 22 and 28 must be clear. | ||
409 | * Software indicator bits 24, 25, 26, and 27 must be | ||
410 | * set. All other Linux PTE bits control the behavior | ||
411 | * of the MMU. | ||
412 | */ | ||
413 | 2: li r11, 0x00f0 | ||
414 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ | ||
415 | DO_8xx_CPU6(0x3d80, r3) | ||
416 | mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ | ||
417 | |||
418 | mfspr r10, SPRN_M_TW /* Restore registers */ | ||
419 | lwz r11, 0(r0) | ||
420 | mtcr r11 | ||
421 | lwz r11, 4(r0) | ||
422 | #ifdef CONFIG_8xx_CPU6 | ||
423 | lwz r3, 8(r0) | ||
424 | #endif | ||
425 | rfi | ||
426 | |||
427 | /* This is an instruction TLB error on the MPC8xx. This could be due | ||
428 | * to many reasons, such as executing guarded memory or illegal instruction | ||
429 | * addresses. There is nothing to do but handle a big time error fault. | ||
430 | */ | ||
431 | . = 0x1300 | ||
432 | InstructionTLBError: | ||
433 | b InstructionAccess | ||
434 | |||
435 | /* This is the data TLB error on the MPC8xx. This could be due to | ||
436 | * many reasons, including a dirty update to a pte. We can catch that | ||
437 | * one here, but anything else is an error. First, we track down the | ||
438 | * Linux pte. If it is valid, write access is allowed, but the | ||
439 | * page dirty bit is not set, we will set it and reload the TLB. For | ||
440 | * any other case, we bail out to a higher level function that can | ||
441 | * handle it. | ||
442 | */ | ||
443 | . = 0x1400 | ||
444 | DataTLBError: | ||
445 | #ifdef CONFIG_8xx_CPU6 | ||
446 | stw r3, 8(r0) | ||
447 | #endif | ||
448 | DO_8xx_CPU6(0x3f80, r3) | ||
449 | mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ | ||
450 | mfcr r10 | ||
451 | stw r10, 0(r0) | ||
452 | stw r11, 4(r0) | ||
453 | |||
454 | /* First, make sure this was a store operation. | ||
455 | */ | ||
456 | mfspr r10, SPRN_DSISR | ||
457 | andis. r11, r10, 0x0200 /* If set, indicates store op */ | ||
458 | beq 2f | ||
459 | |||
460 | /* The EA of a data TLB miss is automatically stored in the MD_EPN | ||
461 | * register. The EA of a data TLB error is automatically stored in | ||
462 | * the DAR, but not the MD_EPN register. We must copy the 20 most | ||
463 | * significant bits of the EA from the DAR to MD_EPN before we | ||
464 | * start walking the page tables. We also need to copy the CASID | ||
465 | * value from the M_CASID register. | ||
466 | * Addendum: The EA of a data TLB error is _supposed_ to be stored | ||
467 | * in DAR, but it seems that this doesn't happen in some cases, such | ||
468 | * as when the error is due to a dcbi instruction to a page with a | ||
469 | * TLB that doesn't have the changed bit set. In such cases, there | ||
470 | * does not appear to be any way to recover the EA of the error | ||
471 | * since it is neither in DAR nor MD_EPN. As a workaround, the | ||
472 | * _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs | ||
473 | * are initialized in mapin_ram(). This will avoid the problem, | ||
474 | * assuming we only use the dcbi instruction on kernel addresses. | ||
475 | */ | ||
476 | mfspr r10, SPRN_DAR | ||
477 | rlwinm r11, r10, 0, 0, 19 | ||
478 | ori r11, r11, MD_EVALID | ||
479 | mfspr r10, SPRN_M_CASID | ||
480 | rlwimi r11, r10, 0, 28, 31 | ||
481 | DO_8xx_CPU6(0x3780, r3) | ||
482 | mtspr SPRN_MD_EPN, r11 | ||
483 | |||
484 | mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ | ||
485 | |||
486 | /* If we are faulting a kernel address, we have to use the | ||
487 | * kernel page tables. | ||
488 | */ | ||
489 | andi. r11, r10, 0x0800 | ||
490 | beq 3f | ||
491 | lis r11, swapper_pg_dir@h | ||
492 | ori r11, r11, swapper_pg_dir@l | ||
493 | rlwimi r10, r11, 0, 2, 19 | ||
494 | 3: | ||
495 | lwz r11, 0(r10) /* Get the level 1 entry */ | ||
496 | rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ | ||
497 | beq 2f /* If zero, bail */ | ||
498 | |||
499 | /* We have a pte table, so fetch the pte from the table. | ||
500 | */ | ||
501 | ori r11, r11, 1 /* Set valid bit in physical L2 page */ | ||
502 | DO_8xx_CPU6(0x3b80, r3) | ||
503 | mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ | ||
504 | mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ | ||
505 | lwz r10, 0(r11) /* Get the pte */ | ||
506 | |||
507 | andi. r11, r10, _PAGE_RW /* Is it writeable? */ | ||
508 | beq 2f /* Bail out if not */ | ||
509 | |||
510 | /* Update 'changed', among others. | ||
511 | */ | ||
512 | ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE | ||
513 | mfspr r11, SPRN_MD_TWC /* Get pte address again */ | ||
514 | stw r10, 0(r11) /* and update pte in table */ | ||
515 | |||
516 | /* The Linux PTE won't go exactly into the MMU TLB. | ||
517 | * Software indicator bits 21, 22 and 28 must be clear. | ||
518 | * Software indicator bits 24, 25, 26, and 27 must be | ||
519 | * set. All other Linux PTE bits control the behavior | ||
520 | * of the MMU. | ||
521 | */ | ||
522 | li r11, 0x00f0 | ||
523 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ | ||
524 | DO_8xx_CPU6(0x3d80, r3) | ||
525 | mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ | ||
526 | |||
527 | mfspr r10, SPRN_M_TW /* Restore registers */ | ||
528 | lwz r11, 0(r0) | ||
529 | mtcr r11 | ||
530 | lwz r11, 4(r0) | ||
531 | #ifdef CONFIG_8xx_CPU6 | ||
532 | lwz r3, 8(r0) | ||
533 | #endif | ||
534 | rfi | ||
535 | 2: | ||
536 | mfspr r10, SPRN_M_TW /* Restore registers */ | ||
537 | lwz r11, 0(r0) | ||
538 | mtcr r11 | ||
539 | lwz r11, 4(r0) | ||
540 | #ifdef CONFIG_8xx_CPU6 | ||
541 | lwz r3, 8(r0) | ||
542 | #endif | ||
543 | b DataAccess | ||
544 | |||
545 | EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) | ||
546 | EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) | ||
547 | EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE) | ||
548 | EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) | ||
549 | EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) | ||
550 | EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE) | ||
551 | EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE) | ||
552 | |||
553 | /* On the MPC8xx, these next four traps are used for development | ||
554 | * support of breakpoints and such. Someday I will get around to | ||
555 | * using them. | ||
556 | */ | ||
557 | EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE) | ||
558 | EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE) | ||
559 | EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE) | ||
560 | EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE) | ||
561 | |||
562 | . = 0x2000 | ||
563 | |||
564 | .globl giveup_fpu | ||
565 | giveup_fpu: | ||
566 | blr | ||
567 | |||
568 | /* | ||
569 | * This is where the main kernel code starts. | ||
570 | */ | ||
571 | start_here: | ||
572 | /* ptr to current */ | ||
573 | lis r2,init_task@h | ||
574 | ori r2,r2,init_task@l | ||
575 | |||
576 | /* ptr to phys current thread */ | ||
577 | tophys(r4,r2) | ||
578 | addi r4,r4,THREAD /* init task's THREAD */ | ||
579 | mtspr SPRN_SPRG3,r4 | ||
580 | li r3,0 | ||
581 | mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */ | ||
582 | |||
583 | /* stack */ | ||
584 | lis r1,init_thread_union@ha | ||
585 | addi r1,r1,init_thread_union@l | ||
586 | li r0,0 | ||
587 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | ||
588 | |||
589 | bl early_init /* We have to do this with MMU on */ | ||
590 | |||
591 | /* | ||
592 | * Decide what sort of machine this is and initialize the MMU. | ||
593 | */ | ||
594 | mr r3,r31 | ||
595 | mr r4,r30 | ||
596 | mr r5,r29 | ||
597 | mr r6,r28 | ||
598 | mr r7,r27 | ||
599 | bl machine_init | ||
600 | bl MMU_init | ||
601 | |||
602 | /* | ||
603 | * Go back to running unmapped so we can load up new values | ||
604 | * and change to using our exception vectors. | ||
605 | * On the 8xx, all we have to do is invalidate the TLB to clear | ||
606 | * the old 8M byte TLB mappings and load the page table base register. | ||
607 | */ | ||
608 | /* The right way to do this would be to track it down through | ||
609 | * init's THREAD like the context switch code does, but this is | ||
610 | * easier......until someone changes init's static structures. | ||
611 | */ | ||
612 | lis r6, swapper_pg_dir@h | ||
613 | ori r6, r6, swapper_pg_dir@l | ||
614 | tophys(r6,r6) | ||
615 | #ifdef CONFIG_8xx_CPU6 | ||
616 | lis r4, cpu6_errata_word@h | ||
617 | ori r4, r4, cpu6_errata_word@l | ||
618 | li r3, 0x3980 | ||
619 | stw r3, 12(r4) | ||
620 | lwz r3, 12(r4) | ||
621 | #endif | ||
622 | mtspr SPRN_M_TWB, r6 | ||
623 | lis r4,2f@h | ||
624 | ori r4,r4,2f@l | ||
625 | tophys(r4,r4) | ||
626 | li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) | ||
627 | mtspr SPRN_SRR0,r4 | ||
628 | mtspr SPRN_SRR1,r3 | ||
629 | rfi | ||
630 | /* Load up the kernel context */ | ||
631 | 2: | ||
632 | SYNC /* Force all PTE updates to finish */ | ||
633 | tlbia /* Clear all TLB entries */ | ||
634 | sync /* wait for tlbia/tlbie to finish */ | ||
635 | TLBSYNC /* ... on all CPUs */ | ||
636 | |||
637 | /* set up the PTE pointers for the Abatron bdiGDB. | ||
638 | */ | ||
639 | tovirt(r6,r6) | ||
640 | lis r5, abatron_pteptrs@h | ||
641 | ori r5, r5, abatron_pteptrs@l | ||
642 | stw r5, 0xf0(r0) /* Must match your Abatron config file */ | ||
643 | tophys(r5,r5) | ||
644 | stw r6, 0(r5) | ||
645 | |||
646 | /* Now turn on the MMU for real! */ | ||
647 | li r4,MSR_KERNEL | ||
648 | lis r3,start_kernel@h | ||
649 | ori r3,r3,start_kernel@l | ||
650 | mtspr SPRN_SRR0,r3 | ||
651 | mtspr SPRN_SRR1,r4 | ||
652 | rfi /* enable MMU and jump to start_kernel */ | ||
653 | |||
654 | /* Set up the initial MMU state so we can do the first level of | ||
655 | * kernel initialization. This maps the first 8 MBytes of memory 1:1 | ||
656 | * virtual to physical. Also, set the cache mode since that is defined | ||
657 | * by TLB entries and perform any additional mapping (like of the IMMR). | ||
658 | * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel, | ||
659 | * 24 Mbytes of data, and the 8M IMMR space. Anything not covered by | ||
660 | * these mappings is mapped by page tables. | ||
661 | */ | ||
662 | initial_mmu: | ||
663 | tlbia /* Invalidate all TLB entries */ | ||
664 | #ifdef CONFIG_PIN_TLB | ||
665 | lis r8, MI_RSV4I@h | ||
666 | ori r8, r8, 0x1c00 | ||
667 | #else | ||
668 | li r8, 0 | ||
669 | #endif | ||
670 | mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */ | ||
671 | |||
672 | #ifdef CONFIG_PIN_TLB | ||
673 | lis r10, (MD_RSV4I | MD_RESETVAL)@h | ||
674 | ori r10, r10, 0x1c00 | ||
675 | mr r8, r10 | ||
676 | #else | ||
677 | lis r10, MD_RESETVAL@h | ||
678 | #endif | ||
679 | #ifndef CONFIG_8xx_COPYBACK | ||
680 | oris r10, r10, MD_WTDEF@h | ||
681 | #endif | ||
682 | mtspr SPRN_MD_CTR, r10 /* Set data TLB control */ | ||
683 | |||
684 | /* Now map the lower 8 Meg into the TLBs. For this quick hack, | ||
685 | * we can load the instruction and data TLB registers with the | ||
686 | * same values. | ||
687 | */ | ||
688 | lis r8, KERNELBASE@h /* Create vaddr for TLB */ | ||
689 | ori r8, r8, MI_EVALID /* Mark it valid */ | ||
690 | mtspr SPRN_MI_EPN, r8 | ||
691 | mtspr SPRN_MD_EPN, r8 | ||
692 | li r8, MI_PS8MEG /* Set 8M byte page */ | ||
693 | ori r8, r8, MI_SVALID /* Make it valid */ | ||
694 | mtspr SPRN_MI_TWC, r8 | ||
695 | mtspr SPRN_MD_TWC, r8 | ||
696 | li r8, MI_BOOTINIT /* Create RPN for address 0 */ | ||
697 | mtspr SPRN_MI_RPN, r8 /* Store TLB entry */ | ||
698 | mtspr SPRN_MD_RPN, r8 | ||
699 | lis r8, MI_Kp@h /* Set the protection mode */ | ||
700 | mtspr SPRN_MI_AP, r8 | ||
701 | mtspr SPRN_MD_AP, r8 | ||
702 | |||
703 | /* Map another 8 MByte at the IMMR to get the processor | ||
704 | * internal registers (among other things). | ||
705 | */ | ||
706 | #ifdef CONFIG_PIN_TLB | ||
707 | addi r10, r10, 0x0100 | ||
708 | mtspr SPRN_MD_CTR, r10 | ||
709 | #endif | ||
710 | mfspr r9, 638 /* Get current IMMR */ | ||
711 | andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */ | ||
712 | |||
713 | mr r8, r9 /* Create vaddr for TLB */ | ||
714 | ori r8, r8, MD_EVALID /* Mark it valid */ | ||
715 | mtspr SPRN_MD_EPN, r8 | ||
716 | li r8, MD_PS8MEG /* Set 8M byte page */ | ||
717 | ori r8, r8, MD_SVALID /* Make it valid */ | ||
718 | mtspr SPRN_MD_TWC, r8 | ||
719 | mr r8, r9 /* Create paddr for TLB */ | ||
720 | ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */ | ||
721 | mtspr SPRN_MD_RPN, r8 | ||
722 | |||
723 | #ifdef CONFIG_PIN_TLB | ||
724 | /* Map two more 8M kernel data pages. | ||
725 | */ | ||
726 | addi r10, r10, 0x0100 | ||
727 | mtspr SPRN_MD_CTR, r10 | ||
728 | |||
729 | lis r8, KERNELBASE@h /* Create vaddr for TLB */ | ||
730 | addis r8, r8, 0x0080 /* Add 8M */ | ||
731 | ori r8, r8, MI_EVALID /* Mark it valid */ | ||
732 | mtspr SPRN_MD_EPN, r8 | ||
733 | li r9, MI_PS8MEG /* Set 8M byte page */ | ||
734 | ori r9, r9, MI_SVALID /* Make it valid */ | ||
735 | mtspr SPRN_MD_TWC, r9 | ||
736 | li r11, MI_BOOTINIT /* Create RPN for address 0 */ | ||
737 | addis r11, r11, 0x0080 /* Add 8M */ | ||
738 | mtspr SPRN_MD_RPN, r8 | ||
739 | |||
740 | addis r8, r8, 0x0080 /* Add 8M */ | ||
741 | mtspr SPRN_MD_EPN, r8 | ||
742 | mtspr SPRN_MD_TWC, r9 | ||
743 | addis r11, r11, 0x0080 /* Add 8M */ | ||
744 | mtspr SPRN_MD_RPN, r8 | ||
745 | #endif | ||
746 | |||
747 | /* Since the cache is enabled according to the information we | ||
748 | * just loaded into the TLB, invalidate and enable the caches here. | ||
749 | * We should probably check/set other modes....later. | ||
750 | */ | ||
751 | lis r8, IDC_INVALL@h | ||
752 | mtspr SPRN_IC_CST, r8 | ||
753 | mtspr SPRN_DC_CST, r8 | ||
754 | lis r8, IDC_ENABLE@h | ||
755 | mtspr SPRN_IC_CST, r8 | ||
756 | #ifdef CONFIG_8xx_COPYBACK | ||
757 | mtspr SPRN_DC_CST, r8 | ||
758 | #else | ||
759 | /* For a debug option, I left this here to easily enable | ||
760 | * the write through cache mode | ||
761 | */ | ||
762 | lis r8, DC_SFWT@h | ||
763 | mtspr SPRN_DC_CST, r8 | ||
764 | lis r8, IDC_ENABLE@h | ||
765 | mtspr SPRN_DC_CST, r8 | ||
766 | #endif | ||
767 | blr | ||
768 | |||
769 | |||
770 | /* | ||
771 | * Set up to use a given MMU context. | ||
772 | * r3 is context number, r4 is PGD pointer. | ||
773 | * | ||
774 | * We place the physical address of the new task page directory loaded | ||
775 | * into the MMU base register, and set the ASID compare register with | ||
776 | * the new "context." | ||
777 | */ | ||
778 | _GLOBAL(set_context) | ||
779 | |||
780 | #ifdef CONFIG_BDI_SWITCH | ||
781 | /* Context switch the PTE pointer for the Abatron BDI2000. | ||
782 | * The PGDIR is passed as second argument. | ||
783 | */ | ||
784 | lis r5, KERNELBASE@h | ||
785 | lwz r5, 0xf0(r5) | ||
786 | stw r4, 0x4(r5) | ||
787 | #endif | ||
788 | |||
789 | #ifdef CONFIG_8xx_CPU6 | ||
790 | lis r6, cpu6_errata_word@h | ||
791 | ori r6, r6, cpu6_errata_word@l | ||
792 | tophys (r4, r4) | ||
793 | li r7, 0x3980 | ||
794 | stw r7, 12(r6) | ||
795 | lwz r7, 12(r6) | ||
796 | mtspr SPRN_M_TWB, r4 /* Update MMU base address */ | ||
797 | li r7, 0x3380 | ||
798 | stw r7, 12(r6) | ||
799 | lwz r7, 12(r6) | ||
800 | mtspr SPRN_M_CASID, r3 /* Update context */ | ||
801 | #else | ||
802 | mtspr SPRN_M_CASID,r3 /* Update context */ | ||
803 | tophys (r4, r4) | ||
804 | mtspr SPRN_M_TWB, r4 /* and pgd */ | ||
805 | #endif | ||
806 | SYNC | ||
807 | blr | ||
808 | |||
809 | #ifdef CONFIG_8xx_CPU6 | ||
810 | /* It's here because it is unique to the 8xx. | ||
811 | * It is important we get called with interrupts disabled. I used to | ||
812 | * do that, but it appears that all code that calls this already had | ||
813 | * interrupt disabled. | ||
814 | */ | ||
815 | .globl set_dec_cpu6 | ||
816 | set_dec_cpu6: | ||
817 | lis r7, cpu6_errata_word@h | ||
818 | ori r7, r7, cpu6_errata_word@l | ||
819 | li r4, 0x2c00 | ||
820 | stw r4, 8(r7) | ||
821 | lwz r4, 8(r7) | ||
822 | mtspr 22, r3 /* Update Decrementer */ | ||
823 | SYNC | ||
824 | blr | ||
825 | #endif | ||
826 | |||
827 | /* | ||
828 | * We put a few things here that have to be page-aligned. | ||
829 | * This stuff goes at the beginning of the data segment, | ||
830 | * which is page-aligned. | ||
831 | */ | ||
832 | .data | ||
833 | .globl sdata | ||
834 | sdata: | ||
835 | .globl empty_zero_page | ||
836 | empty_zero_page: | ||
837 | .space 4096 | ||
838 | |||
839 | .globl swapper_pg_dir | ||
840 | swapper_pg_dir: | ||
841 | .space 4096 | ||
842 | |||
843 | /* | ||
844 | * This space gets a copy of optional info passed to us by the bootstrap | ||
845 | * Used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
846 | */ | ||
847 | .globl cmd_line | ||
848 | cmd_line: | ||
849 | .space 512 | ||
850 | |||
851 | /* Room for two PTE table poiners, usually the kernel and current user | ||
852 | * pointer to their respective root page table (pgdir). | ||
853 | */ | ||
854 | abatron_pteptrs: | ||
855 | .space 8 | ||
856 | |||
857 | #ifdef CONFIG_8xx_CPU6 | ||
858 | .globl cpu6_errata_word | ||
859 | cpu6_errata_word: | ||
860 | .space 16 | ||
861 | #endif | ||
862 | |||
diff --git a/arch/ppc/kernel/head_booke.h b/arch/ppc/kernel/head_booke.h new file mode 100644 index 000000000000..884dac916bce --- /dev/null +++ b/arch/ppc/kernel/head_booke.h | |||
@@ -0,0 +1,340 @@ | |||
1 | #ifndef __HEAD_BOOKE_H__ | ||
2 | #define __HEAD_BOOKE_H__ | ||
3 | |||
4 | /* | ||
5 | * Macros used for common Book-e exception handling | ||
6 | */ | ||
7 | |||
8 | #define SET_IVOR(vector_number, vector_label) \ | ||
9 | li r26,vector_label@l; \ | ||
10 | mtspr SPRN_IVOR##vector_number,r26; \ | ||
11 | sync | ||
12 | |||
13 | #define NORMAL_EXCEPTION_PROLOG \ | ||
14 | mtspr SPRN_SPRG0,r10; /* save two registers to work with */\ | ||
15 | mtspr SPRN_SPRG1,r11; \ | ||
16 | mtspr SPRN_SPRG4W,r1; \ | ||
17 | mfcr r10; /* save CR in r10 for now */\ | ||
18 | mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ | ||
19 | andi. r11,r11,MSR_PR; \ | ||
20 | beq 1f; \ | ||
21 | mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\ | ||
22 | lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\ | ||
23 | addi r1,r1,THREAD_SIZE; \ | ||
24 | 1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\ | ||
25 | mr r11,r1; \ | ||
26 | stw r10,_CCR(r11); /* save various registers */\ | ||
27 | stw r12,GPR12(r11); \ | ||
28 | stw r9,GPR9(r11); \ | ||
29 | mfspr r10,SPRN_SPRG0; \ | ||
30 | stw r10,GPR10(r11); \ | ||
31 | mfspr r12,SPRN_SPRG1; \ | ||
32 | stw r12,GPR11(r11); \ | ||
33 | mflr r10; \ | ||
34 | stw r10,_LINK(r11); \ | ||
35 | mfspr r10,SPRN_SPRG4R; \ | ||
36 | mfspr r12,SPRN_SRR0; \ | ||
37 | stw r10,GPR1(r11); \ | ||
38 | mfspr r9,SPRN_SRR1; \ | ||
39 | stw r10,0(r11); \ | ||
40 | rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ | ||
41 | stw r0,GPR0(r11); \ | ||
42 | SAVE_4GPRS(3, r11); \ | ||
43 | SAVE_2GPRS(7, r11) | ||
44 | |||
45 | /* To handle the additional exception priority levels on 40x and Book-E | ||
46 | * processors we allocate a 4k stack per additional priority level. The various | ||
47 | * head_xxx.S files allocate space (exception_stack_top) for each priority's | ||
48 | * stack times the number of CPUs | ||
49 | * | ||
50 | * On 40x critical is the only additional level | ||
51 | * On 44x/e500 we have critical and machine check | ||
52 | * | ||
53 | * Additionally we reserve a SPRG for each priority level so we can free up a | ||
54 | * GPR to use as the base for indirect access to the exception stacks. This | ||
55 | * is necessary since the MMU is always on, for Book-E parts, and the stacks | ||
56 | * are offset from KERNELBASE. | ||
57 | * | ||
58 | */ | ||
59 | #define BOOKE_EXCEPTION_STACK_SIZE (8192) | ||
60 | |||
61 | /* CRIT_SPRG only used in critical exception handling */ | ||
62 | #define CRIT_SPRG SPRN_SPRG2 | ||
63 | /* MCHECK_SPRG only used in critical exception handling */ | ||
64 | #define MCHECK_SPRG SPRN_SPRG6W | ||
65 | |||
66 | #define MCHECK_STACK_TOP (exception_stack_top - 4096) | ||
67 | #define CRIT_STACK_TOP (exception_stack_top) | ||
68 | |||
69 | #ifdef CONFIG_SMP | ||
70 | #define BOOKE_LOAD_CRIT_STACK \ | ||
71 | mfspr r8,SPRN_PIR; \ | ||
72 | mulli r8,r8,BOOKE_EXCEPTION_STACK_SIZE; \ | ||
73 | neg r8,r8; \ | ||
74 | addis r8,r8,CRIT_STACK_TOP@ha; \ | ||
75 | addi r8,r8,CRIT_STACK_TOP@l | ||
76 | #define BOOKE_LOAD_MCHECK_STACK \ | ||
77 | mfspr r8,SPRN_PIR; \ | ||
78 | mulli r8,r8,BOOKE_EXCEPTION_STACK_SIZE; \ | ||
79 | neg r8,r8; \ | ||
80 | addis r8,r8,MCHECK_STACK_TOP@ha; \ | ||
81 | addi r8,r8,MCHECK_STACK_TOP@l | ||
82 | #else | ||
83 | #define BOOKE_LOAD_CRIT_STACK \ | ||
84 | lis r8,CRIT_STACK_TOP@h; \ | ||
85 | ori r8,r8,CRIT_STACK_TOP@l | ||
86 | #define BOOKE_LOAD_MCHECK_STACK \ | ||
87 | lis r8,MCHECK_STACK_TOP@h; \ | ||
88 | ori r8,r8,MCHECK_STACK_TOP@l | ||
89 | #endif | ||
90 | |||
91 | /* | ||
92 | * Exception prolog for critical exceptions. This is a little different | ||
93 | * from the normal exception prolog above since a critical exception | ||
94 | * can potentially occur at any point during normal exception processing. | ||
95 | * Thus we cannot use the same SPRG registers as the normal prolog above. | ||
96 | * Instead we use a portion of the critical exception stack at low physical | ||
97 | * addresses. | ||
98 | */ | ||
99 | |||
100 | #define CRITICAL_EXCEPTION_PROLOG \ | ||
101 | mtspr CRIT_SPRG,r8; \ | ||
102 | BOOKE_LOAD_CRIT_STACK; /* r8 points to the crit stack */ \ | ||
103 | stw r10,GPR10-INT_FRAME_SIZE(r8); \ | ||
104 | stw r11,GPR11-INT_FRAME_SIZE(r8); \ | ||
105 | mfcr r10; /* save CR in r10 for now */\ | ||
106 | mfspr r11,SPRN_CSRR1; /* check whether user or kernel */\ | ||
107 | andi. r11,r11,MSR_PR; \ | ||
108 | mr r11,r8; \ | ||
109 | mfspr r8,CRIT_SPRG; \ | ||
110 | beq 1f; \ | ||
111 | /* COMING FROM USER MODE */ \ | ||
112 | mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\ | ||
113 | lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ | ||
114 | addi r11,r11,THREAD_SIZE; \ | ||
115 | 1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\ | ||
116 | stw r10,_CCR(r11); /* save various registers */\ | ||
117 | stw r12,GPR12(r11); \ | ||
118 | stw r9,GPR9(r11); \ | ||
119 | mflr r10; \ | ||
120 | stw r10,_LINK(r11); \ | ||
121 | mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ | ||
122 | stw r12,_DEAR(r11); /* since they may have had stuff */\ | ||
123 | mfspr r9,SPRN_ESR; /* in them at the point where the */\ | ||
124 | stw r9,_ESR(r11); /* exception was taken */\ | ||
125 | mfspr r12,SPRN_CSRR0; \ | ||
126 | stw r1,GPR1(r11); \ | ||
127 | mfspr r9,SPRN_CSRR1; \ | ||
128 | stw r1,0(r11); \ | ||
129 | mr r1,r11; \ | ||
130 | rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ | ||
131 | stw r0,GPR0(r11); \ | ||
132 | SAVE_4GPRS(3, r11); \ | ||
133 | SAVE_2GPRS(7, r11) | ||
134 | |||
135 | /* | ||
136 | * Exception prolog for machine check exceptions. This is similar to | ||
137 | * the critical exception prolog, except that machine check exceptions | ||
138 | * have their stack. | ||
139 | */ | ||
140 | #define MCHECK_EXCEPTION_PROLOG \ | ||
141 | mtspr MCHECK_SPRG,r8; \ | ||
142 | BOOKE_LOAD_MCHECK_STACK; /* r8 points to the mcheck stack */\ | ||
143 | stw r10,GPR10-INT_FRAME_SIZE(r8); \ | ||
144 | stw r11,GPR11-INT_FRAME_SIZE(r8); \ | ||
145 | mfcr r10; /* save CR in r10 for now */\ | ||
146 | mfspr r11,SPRN_MCSRR1; /* check whether user or kernel */\ | ||
147 | andi. r11,r11,MSR_PR; \ | ||
148 | mr r11,r8; \ | ||
149 | mfspr r8,MCHECK_SPRG; \ | ||
150 | beq 1f; \ | ||
151 | /* COMING FROM USER MODE */ \ | ||
152 | mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\ | ||
153 | lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ | ||
154 | addi r11,r11,THREAD_SIZE; \ | ||
155 | 1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\ | ||
156 | stw r10,_CCR(r11); /* save various registers */\ | ||
157 | stw r12,GPR12(r11); \ | ||
158 | stw r9,GPR9(r11); \ | ||
159 | mflr r10; \ | ||
160 | stw r10,_LINK(r11); \ | ||
161 | mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ | ||
162 | stw r12,_DEAR(r11); /* since they may have had stuff */\ | ||
163 | mfspr r9,SPRN_ESR; /* in them at the point where the */\ | ||
164 | stw r9,_ESR(r11); /* exception was taken */\ | ||
165 | mfspr r12,SPRN_MCSRR0; \ | ||
166 | stw r1,GPR1(r11); \ | ||
167 | mfspr r9,SPRN_MCSRR1; \ | ||
168 | stw r1,0(r11); \ | ||
169 | mr r1,r11; \ | ||
170 | rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ | ||
171 | stw r0,GPR0(r11); \ | ||
172 | SAVE_4GPRS(3, r11); \ | ||
173 | SAVE_2GPRS(7, r11) | ||
174 | |||
175 | /* | ||
176 | * Exception vectors. | ||
177 | */ | ||
178 | #define START_EXCEPTION(label) \ | ||
179 | .align 5; \ | ||
180 | label: | ||
181 | |||
182 | #define FINISH_EXCEPTION(func) \ | ||
183 | bl transfer_to_handler_full; \ | ||
184 | .long func; \ | ||
185 | .long ret_from_except_full | ||
186 | |||
187 | #define EXCEPTION(n, label, hdlr, xfer) \ | ||
188 | START_EXCEPTION(label); \ | ||
189 | NORMAL_EXCEPTION_PROLOG; \ | ||
190 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
191 | xfer(n, hdlr) | ||
192 | |||
193 | #define CRITICAL_EXCEPTION(n, label, hdlr) \ | ||
194 | START_EXCEPTION(label); \ | ||
195 | CRITICAL_EXCEPTION_PROLOG; \ | ||
196 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
197 | EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ | ||
198 | NOCOPY, crit_transfer_to_handler, \ | ||
199 | ret_from_crit_exc) | ||
200 | |||
201 | #define MCHECK_EXCEPTION(n, label, hdlr) \ | ||
202 | START_EXCEPTION(label); \ | ||
203 | MCHECK_EXCEPTION_PROLOG; \ | ||
204 | mfspr r5,SPRN_ESR; \ | ||
205 | stw r5,_ESR(r11); \ | ||
206 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
207 | EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ | ||
208 | NOCOPY, mcheck_transfer_to_handler, \ | ||
209 | ret_from_mcheck_exc) | ||
210 | |||
211 | #define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \ | ||
212 | li r10,trap; \ | ||
213 | stw r10,TRAP(r11); \ | ||
214 | lis r10,msr@h; \ | ||
215 | ori r10,r10,msr@l; \ | ||
216 | copyee(r10, r9); \ | ||
217 | bl tfer; \ | ||
218 | .long hdlr; \ | ||
219 | .long ret | ||
220 | |||
221 | #define COPY_EE(d, s) rlwimi d,s,0,16,16 | ||
222 | #define NOCOPY(d, s) | ||
223 | |||
224 | #define EXC_XFER_STD(n, hdlr) \ | ||
225 | EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \ | ||
226 | ret_from_except_full) | ||
227 | |||
228 | #define EXC_XFER_LITE(n, hdlr) \ | ||
229 | EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \ | ||
230 | ret_from_except) | ||
231 | |||
232 | #define EXC_XFER_EE(n, hdlr) \ | ||
233 | EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \ | ||
234 | ret_from_except_full) | ||
235 | |||
236 | #define EXC_XFER_EE_LITE(n, hdlr) \ | ||
237 | EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \ | ||
238 | ret_from_except) | ||
239 | |||
240 | |||
241 | /* Check for a single step debug exception while in an exception | ||
242 | * handler before state has been saved. This is to catch the case | ||
243 | * where an instruction that we are trying to single step causes | ||
244 | * an exception (eg ITLB/DTLB miss) and thus the first instruction of | ||
245 | * the exception handler generates a single step debug exception. | ||
246 | * | ||
247 | * If we get a debug trap on the first instruction of an exception handler, | ||
248 | * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is | ||
249 | * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR). | ||
250 | * The exception handler was handling a non-critical interrupt, so it will | ||
251 | * save (and later restore) the MSR via SPRN_CSRR1, which will still have | ||
252 | * the MSR_DE bit set. | ||
253 | */ | ||
254 | #define DEBUG_EXCEPTION \ | ||
255 | START_EXCEPTION(Debug); \ | ||
256 | CRITICAL_EXCEPTION_PROLOG; \ | ||
257 | \ | ||
258 | /* \ | ||
259 | * If there is a single step or branch-taken exception in an \ | ||
260 | * exception entry sequence, it was probably meant to apply to \ | ||
261 | * the code where the exception occurred (since exception entry \ | ||
262 | * doesn't turn off DE automatically). We simulate the effect \ | ||
263 | * of turning off DE on entry to an exception handler by turning \ | ||
264 | * off DE in the CSRR1 value and clearing the debug status. \ | ||
265 | */ \ | ||
266 | mfspr r10,SPRN_DBSR; /* check single-step/branch taken */ \ | ||
267 | andis. r10,r10,DBSR_IC@h; \ | ||
268 | beq+ 2f; \ | ||
269 | \ | ||
270 | lis r10,KERNELBASE@h; /* check if exception in vectors */ \ | ||
271 | ori r10,r10,KERNELBASE@l; \ | ||
272 | cmplw r12,r10; \ | ||
273 | blt+ 2f; /* addr below exception vectors */ \ | ||
274 | \ | ||
275 | lis r10,Debug@h; \ | ||
276 | ori r10,r10,Debug@l; \ | ||
277 | cmplw r12,r10; \ | ||
278 | bgt+ 2f; /* addr above exception vectors */ \ | ||
279 | \ | ||
280 | /* here it looks like we got an inappropriate debug exception. */ \ | ||
281 | 1: rlwinm r9,r9,0,~MSR_DE; /* clear DE in the CSRR1 value */ \ | ||
282 | lis r10,DBSR_IC@h; /* clear the IC event */ \ | ||
283 | mtspr SPRN_DBSR,r10; \ | ||
284 | /* restore state and get out */ \ | ||
285 | lwz r10,_CCR(r11); \ | ||
286 | lwz r0,GPR0(r11); \ | ||
287 | lwz r1,GPR1(r11); \ | ||
288 | mtcrf 0x80,r10; \ | ||
289 | mtspr SPRN_CSRR0,r12; \ | ||
290 | mtspr SPRN_CSRR1,r9; \ | ||
291 | lwz r9,GPR9(r11); \ | ||
292 | lwz r12,GPR12(r11); \ | ||
293 | mtspr CRIT_SPRG,r8; \ | ||
294 | BOOKE_LOAD_CRIT_STACK; /* r8 points to the crit stack */ \ | ||
295 | lwz r10,GPR10-INT_FRAME_SIZE(r8); \ | ||
296 | lwz r11,GPR11-INT_FRAME_SIZE(r8); \ | ||
297 | mfspr r8,CRIT_SPRG; \ | ||
298 | \ | ||
299 | rfci; \ | ||
300 | b .; \ | ||
301 | \ | ||
302 | /* continue normal handling for a critical exception... */ \ | ||
303 | 2: mfspr r4,SPRN_DBSR; \ | ||
304 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
305 | EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) | ||
306 | |||
307 | #define INSTRUCTION_STORAGE_EXCEPTION \ | ||
308 | START_EXCEPTION(InstructionStorage) \ | ||
309 | NORMAL_EXCEPTION_PROLOG; \ | ||
310 | mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ | ||
311 | stw r5,_ESR(r11); \ | ||
312 | mr r4,r12; /* Pass SRR0 as arg2 */ \ | ||
313 | li r5,0; /* Pass zero as arg3 */ \ | ||
314 | EXC_XFER_EE_LITE(0x0400, handle_page_fault) | ||
315 | |||
316 | #define ALIGNMENT_EXCEPTION \ | ||
317 | START_EXCEPTION(Alignment) \ | ||
318 | NORMAL_EXCEPTION_PROLOG; \ | ||
319 | mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \ | ||
320 | stw r4,_DEAR(r11); \ | ||
321 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
322 | EXC_XFER_EE(0x0600, AlignmentException) | ||
323 | |||
324 | #define PROGRAM_EXCEPTION \ | ||
325 | START_EXCEPTION(Program) \ | ||
326 | NORMAL_EXCEPTION_PROLOG; \ | ||
327 | mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \ | ||
328 | stw r4,_ESR(r11); \ | ||
329 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
330 | EXC_XFER_STD(0x0700, ProgramCheckException) | ||
331 | |||
332 | #define DECREMENTER_EXCEPTION \ | ||
333 | START_EXCEPTION(Decrementer) \ | ||
334 | NORMAL_EXCEPTION_PROLOG; \ | ||
335 | lis r0,TSR_DIS@h; /* Setup the DEC interrupt mask */ \ | ||
336 | mtspr SPRN_TSR,r0; /* Clear the DEC interrupt */ \ | ||
337 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
338 | EXC_XFER_LITE(0x0900, timer_interrupt) | ||
339 | |||
340 | #endif /* __HEAD_BOOKE_H__ */ | ||
diff --git a/arch/ppc/kernel/head_fsl_booke.S b/arch/ppc/kernel/head_fsl_booke.S new file mode 100644 index 000000000000..dea19c216fc3 --- /dev/null +++ b/arch/ppc/kernel/head_fsl_booke.S | |||
@@ -0,0 +1,952 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/head_fsl_booke.S | ||
3 | * | ||
4 | * Kernel execution entry point code. | ||
5 | * | ||
6 | * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> | ||
7 | * Initial PowerPC version. | ||
8 | * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
9 | * Rewritten for PReP | ||
10 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | ||
11 | * Low-level exception handers, MMU support, and rewrite. | ||
12 | * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> | ||
13 | * PowerPC 8xx modifications. | ||
14 | * Copyright (c) 1998-1999 TiVo, Inc. | ||
15 | * PowerPC 403GCX modifications. | ||
16 | * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> | ||
17 | * PowerPC 403GCX/405GP modifications. | ||
18 | * Copyright 2000 MontaVista Software Inc. | ||
19 | * PPC405 modifications | ||
20 | * PowerPC 403GCX/405GP modifications. | ||
21 | * Author: MontaVista Software, Inc. | ||
22 | * frank_rowand@mvista.com or source@mvista.com | ||
23 | * debbie_chu@mvista.com | ||
24 | * Copyright 2002-2004 MontaVista Software, Inc. | ||
25 | * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> | ||
26 | * Copyright 2004 Freescale Semiconductor, Inc | ||
27 | * PowerPC e500 modifications, Kumar Gala <kumar.gala@freescale.com> | ||
28 | * | ||
29 | * This program is free software; you can redistribute it and/or modify it | ||
30 | * under the terms of the GNU General Public License as published by the | ||
31 | * Free Software Foundation; either version 2 of the License, or (at your | ||
32 | * option) any later version. | ||
33 | */ | ||
34 | |||
35 | #include <linux/config.h> | ||
36 | #include <linux/threads.h> | ||
37 | #include <asm/processor.h> | ||
38 | #include <asm/page.h> | ||
39 | #include <asm/mmu.h> | ||
40 | #include <asm/pgtable.h> | ||
41 | #include <asm/cputable.h> | ||
42 | #include <asm/thread_info.h> | ||
43 | #include <asm/ppc_asm.h> | ||
44 | #include <asm/offsets.h> | ||
45 | #include "head_booke.h" | ||
46 | |||
47 | /* As with the other PowerPC ports, it is expected that when code | ||
48 | * execution begins here, the following registers contain valid, yet | ||
49 | * optional, information: | ||
50 | * | ||
51 | * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) | ||
52 | * r4 - Starting address of the init RAM disk | ||
53 | * r5 - Ending address of the init RAM disk | ||
54 | * r6 - Start of kernel command line string (e.g. "mem=128") | ||
55 | * r7 - End of kernel command line string | ||
56 | * | ||
57 | */ | ||
58 | .text | ||
59 | _GLOBAL(_stext) | ||
60 | _GLOBAL(_start) | ||
61 | /* | ||
62 | * Reserve a word at a fixed location to store the address | ||
63 | * of abatron_pteptrs | ||
64 | */ | ||
65 | nop | ||
66 | /* | ||
67 | * Save parameters we are passed | ||
68 | */ | ||
69 | mr r31,r3 | ||
70 | mr r30,r4 | ||
71 | mr r29,r5 | ||
72 | mr r28,r6 | ||
73 | mr r27,r7 | ||
74 | li r24,0 /* CPU number */ | ||
75 | |||
76 | /* We try to not make any assumptions about how the boot loader | ||
77 | * setup or used the TLBs. We invalidate all mappings from the | ||
78 | * boot loader and load a single entry in TLB1[0] to map the | ||
79 | * first 16M of kernel memory. Any boot info passed from the | ||
80 | * bootloader needs to live in this first 16M. | ||
81 | * | ||
82 | * Requirement on bootloader: | ||
83 | * - The page we're executing in needs to reside in TLB1 and | ||
84 | * have IPROT=1. If not an invalidate broadcast could | ||
85 | * evict the entry we're currently executing in. | ||
86 | * | ||
87 | * r3 = Index of TLB1 were executing in | ||
88 | * r4 = Current MSR[IS] | ||
89 | * r5 = Index of TLB1 temp mapping | ||
90 | * | ||
91 | * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0] | ||
92 | * if needed | ||
93 | */ | ||
94 | |||
95 | /* 1. Find the index of the entry we're executing in */ | ||
96 | bl invstr /* Find our address */ | ||
97 | invstr: mflr r6 /* Make it accessible */ | ||
98 | mfmsr r7 | ||
99 | rlwinm r4,r7,27,31,31 /* extract MSR[IS] */ | ||
100 | mfspr r7, SPRN_PID0 | ||
101 | slwi r7,r7,16 | ||
102 | or r7,r7,r4 | ||
103 | mtspr SPRN_MAS6,r7 | ||
104 | tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */ | ||
105 | mfspr r7,SPRN_MAS1 | ||
106 | andis. r7,r7,MAS1_VALID@h | ||
107 | bne match_TLB | ||
108 | mfspr r7,SPRN_PID1 | ||
109 | slwi r7,r7,16 | ||
110 | or r7,r7,r4 | ||
111 | mtspr SPRN_MAS6,r7 | ||
112 | tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */ | ||
113 | mfspr r7,SPRN_MAS1 | ||
114 | andis. r7,r7,MAS1_VALID@h | ||
115 | bne match_TLB | ||
116 | mfspr r7, SPRN_PID2 | ||
117 | slwi r7,r7,16 | ||
118 | or r7,r7,r4 | ||
119 | mtspr SPRN_MAS6,r7 | ||
120 | tlbsx 0,r6 /* Fall through, we had to match */ | ||
121 | match_TLB: | ||
122 | mfspr r7,SPRN_MAS0 | ||
123 | rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */ | ||
124 | |||
125 | mfspr r7,SPRN_MAS1 /* Insure IPROT set */ | ||
126 | oris r7,r7,MAS1_IPROT@h | ||
127 | mtspr SPRN_MAS1,r7 | ||
128 | tlbwe | ||
129 | |||
130 | /* 2. Invalidate all entries except the entry we're executing in */ | ||
131 | mfspr r9,SPRN_TLB1CFG | ||
132 | andi. r9,r9,0xfff | ||
133 | li r6,0 /* Set Entry counter to 0 */ | ||
134 | 1: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ | ||
135 | rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ | ||
136 | mtspr SPRN_MAS0,r7 | ||
137 | tlbre | ||
138 | mfspr r7,SPRN_MAS1 | ||
139 | rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ | ||
140 | cmpw r3,r6 | ||
141 | beq skpinv /* Dont update the current execution TLB */ | ||
142 | mtspr SPRN_MAS1,r7 | ||
143 | tlbwe | ||
144 | isync | ||
145 | skpinv: addi r6,r6,1 /* Increment */ | ||
146 | cmpw r6,r9 /* Are we done? */ | ||
147 | bne 1b /* If not, repeat */ | ||
148 | |||
149 | /* Invalidate TLB0 */ | ||
150 | li r6,0x04 | ||
151 | tlbivax 0,r6 | ||
152 | #ifdef CONFIG_SMP | ||
153 | tlbsync | ||
154 | #endif | ||
155 | /* Invalidate TLB1 */ | ||
156 | li r6,0x0c | ||
157 | tlbivax 0,r6 | ||
158 | #ifdef CONFIG_SMP | ||
159 | tlbsync | ||
160 | #endif | ||
161 | msync | ||
162 | |||
163 | /* 3. Setup a temp mapping and jump to it */ | ||
164 | andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */ | ||
165 | addi r5, r5, 0x1 | ||
166 | lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ | ||
167 | rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ | ||
168 | mtspr SPRN_MAS0,r7 | ||
169 | tlbre | ||
170 | |||
171 | /* Just modify the entry ID and EPN for the temp mapping */ | ||
172 | lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ | ||
173 | rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ | ||
174 | mtspr SPRN_MAS0,r7 | ||
175 | xori r6,r4,1 /* Setup TMP mapping in the other Address space */ | ||
176 | slwi r6,r6,12 | ||
177 | oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h | ||
178 | ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l | ||
179 | mtspr SPRN_MAS1,r6 | ||
180 | mfspr r6,SPRN_MAS2 | ||
181 | li r7,0 /* temp EPN = 0 */ | ||
182 | rlwimi r7,r6,0,20,31 | ||
183 | mtspr SPRN_MAS2,r7 | ||
184 | tlbwe | ||
185 | |||
186 | xori r6,r4,1 | ||
187 | slwi r6,r6,5 /* setup new context with other address space */ | ||
188 | bl 1f /* Find our address */ | ||
189 | 1: mflr r9 | ||
190 | rlwimi r7,r9,0,20,31 | ||
191 | addi r7,r7,24 | ||
192 | mtspr SPRN_SRR0,r7 | ||
193 | mtspr SPRN_SRR1,r6 | ||
194 | rfi | ||
195 | |||
196 | /* 4. Clear out PIDs & Search info */ | ||
197 | li r6,0 | ||
198 | mtspr SPRN_PID0,r6 | ||
199 | mtspr SPRN_PID1,r6 | ||
200 | mtspr SPRN_PID2,r6 | ||
201 | mtspr SPRN_MAS6,r6 | ||
202 | |||
203 | /* 5. Invalidate mapping we started in */ | ||
204 | lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ | ||
205 | rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ | ||
206 | mtspr SPRN_MAS0,r7 | ||
207 | tlbre | ||
208 | li r6,0 | ||
209 | mtspr SPRN_MAS1,r6 | ||
210 | tlbwe | ||
211 | /* Invalidate TLB1 */ | ||
212 | li r9,0x0c | ||
213 | tlbivax 0,r9 | ||
214 | #ifdef CONFIG_SMP | ||
215 | tlbsync | ||
216 | #endif | ||
217 | msync | ||
218 | |||
219 | /* 6. Setup KERNELBASE mapping in TLB1[0] */ | ||
220 | lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ | ||
221 | mtspr SPRN_MAS0,r6 | ||
222 | lis r6,(MAS1_VALID|MAS1_IPROT)@h | ||
223 | ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l | ||
224 | mtspr SPRN_MAS1,r6 | ||
225 | li r7,0 | ||
226 | lis r6,KERNELBASE@h | ||
227 | ori r6,r6,KERNELBASE@l | ||
228 | rlwimi r6,r7,0,20,31 | ||
229 | mtspr SPRN_MAS2,r6 | ||
230 | li r7,(MAS3_SX|MAS3_SW|MAS3_SR) | ||
231 | mtspr SPRN_MAS3,r7 | ||
232 | tlbwe | ||
233 | |||
234 | /* 7. Jump to KERNELBASE mapping */ | ||
235 | li r7,0 | ||
236 | bl 1f /* Find our address */ | ||
237 | 1: mflr r9 | ||
238 | rlwimi r6,r9,0,20,31 | ||
239 | addi r6,r6,24 | ||
240 | mtspr SPRN_SRR0,r6 | ||
241 | mtspr SPRN_SRR1,r7 | ||
242 | rfi /* start execution out of TLB1[0] entry */ | ||
243 | |||
244 | /* 8. Clear out the temp mapping */ | ||
245 | lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ | ||
246 | rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ | ||
247 | mtspr SPRN_MAS0,r7 | ||
248 | tlbre | ||
249 | mtspr SPRN_MAS1,r8 | ||
250 | tlbwe | ||
251 | /* Invalidate TLB1 */ | ||
252 | li r9,0x0c | ||
253 | tlbivax 0,r9 | ||
254 | #ifdef CONFIG_SMP | ||
255 | tlbsync | ||
256 | #endif | ||
257 | msync | ||
258 | |||
259 | /* Establish the interrupt vector offsets */ | ||
260 | SET_IVOR(0, CriticalInput); | ||
261 | SET_IVOR(1, MachineCheck); | ||
262 | SET_IVOR(2, DataStorage); | ||
263 | SET_IVOR(3, InstructionStorage); | ||
264 | SET_IVOR(4, ExternalInput); | ||
265 | SET_IVOR(5, Alignment); | ||
266 | SET_IVOR(6, Program); | ||
267 | SET_IVOR(7, FloatingPointUnavailable); | ||
268 | SET_IVOR(8, SystemCall); | ||
269 | SET_IVOR(9, AuxillaryProcessorUnavailable); | ||
270 | SET_IVOR(10, Decrementer); | ||
271 | SET_IVOR(11, FixedIntervalTimer); | ||
272 | SET_IVOR(12, WatchdogTimer); | ||
273 | SET_IVOR(13, DataTLBError); | ||
274 | SET_IVOR(14, InstructionTLBError); | ||
275 | SET_IVOR(15, Debug); | ||
276 | SET_IVOR(32, SPEUnavailable); | ||
277 | SET_IVOR(33, SPEFloatingPointData); | ||
278 | SET_IVOR(34, SPEFloatingPointRound); | ||
279 | SET_IVOR(35, PerformanceMonitor); | ||
280 | |||
281 | /* Establish the interrupt vector base */ | ||
282 | lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ | ||
283 | mtspr SPRN_IVPR,r4 | ||
284 | |||
285 | /* Setup the defaults for TLB entries */ | ||
286 | li r2,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l | ||
287 | mtspr SPRN_MAS4, r2 | ||
288 | |||
289 | #if 0 | ||
290 | /* Enable DOZE */ | ||
291 | mfspr r2,SPRN_HID0 | ||
292 | oris r2,r2,HID0_DOZE@h | ||
293 | mtspr SPRN_HID0, r2 | ||
294 | #endif | ||
295 | |||
296 | /* | ||
297 | * This is where the main kernel code starts. | ||
298 | */ | ||
299 | |||
300 | /* ptr to current */ | ||
301 | lis r2,init_task@h | ||
302 | ori r2,r2,init_task@l | ||
303 | |||
304 | /* ptr to current thread */ | ||
305 | addi r4,r2,THREAD /* init task's THREAD */ | ||
306 | mtspr SPRN_SPRG3,r4 | ||
307 | |||
308 | /* stack */ | ||
309 | lis r1,init_thread_union@h | ||
310 | ori r1,r1,init_thread_union@l | ||
311 | li r0,0 | ||
312 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | ||
313 | |||
314 | bl early_init | ||
315 | |||
316 | mfspr r3,SPRN_TLB1CFG | ||
317 | andi. r3,r3,0xfff | ||
318 | lis r4,num_tlbcam_entries@ha | ||
319 | stw r3,num_tlbcam_entries@l(r4) | ||
320 | /* | ||
321 | * Decide what sort of machine this is and initialize the MMU. | ||
322 | */ | ||
323 | mr r3,r31 | ||
324 | mr r4,r30 | ||
325 | mr r5,r29 | ||
326 | mr r6,r28 | ||
327 | mr r7,r27 | ||
328 | bl machine_init | ||
329 | bl MMU_init | ||
330 | |||
331 | /* Setup PTE pointers for the Abatron bdiGDB */ | ||
332 | lis r6, swapper_pg_dir@h | ||
333 | ori r6, r6, swapper_pg_dir@l | ||
334 | lis r5, abatron_pteptrs@h | ||
335 | ori r5, r5, abatron_pteptrs@l | ||
336 | lis r4, KERNELBASE@h | ||
337 | ori r4, r4, KERNELBASE@l | ||
338 | stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ | ||
339 | stw r6, 0(r5) | ||
340 | |||
341 | /* Let's move on */ | ||
342 | lis r4,start_kernel@h | ||
343 | ori r4,r4,start_kernel@l | ||
344 | lis r3,MSR_KERNEL@h | ||
345 | ori r3,r3,MSR_KERNEL@l | ||
346 | mtspr SPRN_SRR0,r4 | ||
347 | mtspr SPRN_SRR1,r3 | ||
348 | rfi /* change context and jump to start_kernel */ | ||
349 | |||
350 | /* | ||
351 | * Interrupt vector entry code | ||
352 | * | ||
353 | * The Book E MMUs are always on so we don't need to handle | ||
354 | * interrupts in real mode as with previous PPC processors. In | ||
355 | * this case we handle interrupts in the kernel virtual address | ||
356 | * space. | ||
357 | * | ||
358 | * Interrupt vectors are dynamically placed relative to the | ||
359 | * interrupt prefix as determined by the address of interrupt_base. | ||
360 | * The interrupt vectors offsets are programmed using the labels | ||
361 | * for each interrupt vector entry. | ||
362 | * | ||
363 | * Interrupt vectors must be aligned on a 16 byte boundary. | ||
364 | * We align on a 32 byte cache line boundary for good measure. | ||
365 | */ | ||
366 | |||
367 | interrupt_base: | ||
368 | /* Critical Input Interrupt */ | ||
369 | CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException) | ||
370 | |||
371 | /* Machine Check Interrupt */ | ||
372 | MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException) | ||
373 | |||
374 | /* Data Storage Interrupt */ | ||
375 | START_EXCEPTION(DataStorage) | ||
376 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
377 | mtspr SPRN_SPRG1, r11 | ||
378 | mtspr SPRN_SPRG4W, r12 | ||
379 | mtspr SPRN_SPRG5W, r13 | ||
380 | mfcr r11 | ||
381 | mtspr SPRN_SPRG7W, r11 | ||
382 | |||
383 | /* | ||
384 | * Check if it was a store fault, if not then bail | ||
385 | * because a user tried to access a kernel or | ||
386 | * read-protected page. Otherwise, get the | ||
387 | * offending address and handle it. | ||
388 | */ | ||
389 | mfspr r10, SPRN_ESR | ||
390 | andis. r10, r10, ESR_ST@h | ||
391 | beq 2f | ||
392 | |||
393 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
394 | |||
395 | /* If we are faulting a kernel address, we have to use the | ||
396 | * kernel page tables. | ||
397 | */ | ||
398 | lis r11, TASK_SIZE@h | ||
399 | ori r11, r11, TASK_SIZE@l | ||
400 | cmplw 0, r10, r11 | ||
401 | bge 2f | ||
402 | |||
403 | /* Get the PGD for the current thread */ | ||
404 | 3: | ||
405 | mfspr r11,SPRN_SPRG3 | ||
406 | lwz r11,PGDIR(r11) | ||
407 | 4: | ||
408 | rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ | ||
409 | lwz r11, 0(r11) /* Get L1 entry */ | ||
410 | rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ | ||
411 | beq 2f /* Bail if no table */ | ||
412 | |||
413 | rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ | ||
414 | lwz r11, 0(r12) /* Get Linux PTE */ | ||
415 | |||
416 | /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */ | ||
417 | andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE | ||
418 | cmpwi 0, r13, _PAGE_RW|_PAGE_USER | ||
419 | bne 2f /* Bail if not */ | ||
420 | |||
421 | /* Update 'changed'. */ | ||
422 | ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE | ||
423 | stw r11, 0(r12) /* Update Linux page table */ | ||
424 | |||
425 | /* MAS2 not updated as the entry does exist in the tlb, this | ||
426 | fault taken to detect state transition (eg: COW -> DIRTY) | ||
427 | */ | ||
428 | lis r12, MAS3_RPN@h | ||
429 | ori r12, r12, _PAGE_HWEXEC | MAS3_RPN@l | ||
430 | and r11, r11, r12 | ||
431 | rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */ | ||
432 | ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */ | ||
433 | |||
434 | /* update search PID in MAS6, AS = 0 */ | ||
435 | mfspr r12, SPRN_PID0 | ||
436 | slwi r12, r12, 16 | ||
437 | mtspr SPRN_MAS6, r12 | ||
438 | |||
439 | /* find the TLB index that caused the fault. It has to be here. */ | ||
440 | tlbsx 0, r10 | ||
441 | |||
442 | mtspr SPRN_MAS3,r11 | ||
443 | tlbwe | ||
444 | |||
445 | /* Done...restore registers and get out of here. */ | ||
446 | mfspr r11, SPRN_SPRG7R | ||
447 | mtcr r11 | ||
448 | mfspr r13, SPRN_SPRG5R | ||
449 | mfspr r12, SPRN_SPRG4R | ||
450 | mfspr r11, SPRN_SPRG1 | ||
451 | mfspr r10, SPRN_SPRG0 | ||
452 | rfi /* Force context change */ | ||
453 | |||
454 | 2: | ||
455 | /* | ||
456 | * The bailout. Restore registers to pre-exception conditions | ||
457 | * and call the heavyweights to help us out. | ||
458 | */ | ||
459 | mfspr r11, SPRN_SPRG7R | ||
460 | mtcr r11 | ||
461 | mfspr r13, SPRN_SPRG5R | ||
462 | mfspr r12, SPRN_SPRG4R | ||
463 | mfspr r11, SPRN_SPRG1 | ||
464 | mfspr r10, SPRN_SPRG0 | ||
465 | b data_access | ||
466 | |||
467 | /* Instruction Storage Interrupt */ | ||
468 | INSTRUCTION_STORAGE_EXCEPTION | ||
469 | |||
470 | /* External Input Interrupt */ | ||
471 | EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) | ||
472 | |||
473 | /* Alignment Interrupt */ | ||
474 | ALIGNMENT_EXCEPTION | ||
475 | |||
476 | /* Program Interrupt */ | ||
477 | PROGRAM_EXCEPTION | ||
478 | |||
479 | /* Floating Point Unavailable Interrupt */ | ||
480 | EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) | ||
481 | |||
482 | /* System Call Interrupt */ | ||
483 | START_EXCEPTION(SystemCall) | ||
484 | NORMAL_EXCEPTION_PROLOG | ||
485 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) | ||
486 | |||
487 | /* Auxillary Processor Unavailable Interrupt */ | ||
488 | EXCEPTION(0x2900, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE) | ||
489 | |||
490 | /* Decrementer Interrupt */ | ||
491 | DECREMENTER_EXCEPTION | ||
492 | |||
493 | /* Fixed Internal Timer Interrupt */ | ||
494 | /* TODO: Add FIT support */ | ||
495 | EXCEPTION(0x3100, FixedIntervalTimer, UnknownException, EXC_XFER_EE) | ||
496 | |||
497 | /* Watchdog Timer Interrupt */ | ||
498 | /* TODO: Add watchdog support */ | ||
499 | CRITICAL_EXCEPTION(0x3200, WatchdogTimer, UnknownException) | ||
500 | |||
501 | /* Data TLB Error Interrupt */ | ||
502 | START_EXCEPTION(DataTLBError) | ||
503 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
504 | mtspr SPRN_SPRG1, r11 | ||
505 | mtspr SPRN_SPRG4W, r12 | ||
506 | mtspr SPRN_SPRG5W, r13 | ||
507 | mfcr r11 | ||
508 | mtspr SPRN_SPRG7W, r11 | ||
509 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
510 | |||
511 | /* If we are faulting a kernel address, we have to use the | ||
512 | * kernel page tables. | ||
513 | */ | ||
514 | lis r11, TASK_SIZE@h | ||
515 | ori r11, r11, TASK_SIZE@l | ||
516 | cmplw 5, r10, r11 | ||
517 | blt 5, 3f | ||
518 | lis r11, swapper_pg_dir@h | ||
519 | ori r11, r11, swapper_pg_dir@l | ||
520 | |||
521 | mfspr r12,SPRN_MAS1 /* Set TID to 0 */ | ||
522 | rlwinm r12,r12,0,16,1 | ||
523 | mtspr SPRN_MAS1,r12 | ||
524 | |||
525 | b 4f | ||
526 | |||
527 | /* Get the PGD for the current thread */ | ||
528 | 3: | ||
529 | mfspr r11,SPRN_SPRG3 | ||
530 | lwz r11,PGDIR(r11) | ||
531 | |||
532 | 4: | ||
533 | rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ | ||
534 | lwz r11, 0(r11) /* Get L1 entry */ | ||
535 | rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ | ||
536 | beq 2f /* Bail if no table */ | ||
537 | |||
538 | rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ | ||
539 | lwz r11, 0(r12) /* Get Linux PTE */ | ||
540 | andi. r13, r11, _PAGE_PRESENT | ||
541 | beq 2f | ||
542 | |||
543 | ori r11, r11, _PAGE_ACCESSED | ||
544 | stw r11, 0(r12) | ||
545 | |||
546 | /* Jump to common tlb load */ | ||
547 | b finish_tlb_load | ||
548 | 2: | ||
549 | /* The bailout. Restore registers to pre-exception conditions | ||
550 | * and call the heavyweights to help us out. | ||
551 | */ | ||
552 | mfspr r11, SPRN_SPRG7R | ||
553 | mtcr r11 | ||
554 | mfspr r13, SPRN_SPRG5R | ||
555 | mfspr r12, SPRN_SPRG4R | ||
556 | mfspr r11, SPRN_SPRG1 | ||
557 | mfspr r10, SPRN_SPRG0 | ||
558 | b data_access | ||
559 | |||
560 | /* Instruction TLB Error Interrupt */ | ||
561 | /* | ||
562 | * Nearly the same as above, except we get our | ||
563 | * information from different registers and bailout | ||
564 | * to a different point. | ||
565 | */ | ||
566 | START_EXCEPTION(InstructionTLBError) | ||
567 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
568 | mtspr SPRN_SPRG1, r11 | ||
569 | mtspr SPRN_SPRG4W, r12 | ||
570 | mtspr SPRN_SPRG5W, r13 | ||
571 | mfcr r11 | ||
572 | mtspr SPRN_SPRG7W, r11 | ||
573 | mfspr r10, SPRN_SRR0 /* Get faulting address */ | ||
574 | |||
575 | /* If we are faulting a kernel address, we have to use the | ||
576 | * kernel page tables. | ||
577 | */ | ||
578 | lis r11, TASK_SIZE@h | ||
579 | ori r11, r11, TASK_SIZE@l | ||
580 | cmplw 5, r10, r11 | ||
581 | blt 5, 3f | ||
582 | lis r11, swapper_pg_dir@h | ||
583 | ori r11, r11, swapper_pg_dir@l | ||
584 | |||
585 | mfspr r12,SPRN_MAS1 /* Set TID to 0 */ | ||
586 | rlwinm r12,r12,0,16,1 | ||
587 | mtspr SPRN_MAS1,r12 | ||
588 | |||
589 | b 4f | ||
590 | |||
591 | /* Get the PGD for the current thread */ | ||
592 | 3: | ||
593 | mfspr r11,SPRN_SPRG3 | ||
594 | lwz r11,PGDIR(r11) | ||
595 | |||
596 | 4: | ||
597 | rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ | ||
598 | lwz r11, 0(r11) /* Get L1 entry */ | ||
599 | rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ | ||
600 | beq 2f /* Bail if no table */ | ||
601 | |||
602 | rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ | ||
603 | lwz r11, 0(r12) /* Get Linux PTE */ | ||
604 | andi. r13, r11, _PAGE_PRESENT | ||
605 | beq 2f | ||
606 | |||
607 | ori r11, r11, _PAGE_ACCESSED | ||
608 | stw r11, 0(r12) | ||
609 | |||
610 | /* Jump to common TLB load point */ | ||
611 | b finish_tlb_load | ||
612 | |||
613 | 2: | ||
614 | /* The bailout. Restore registers to pre-exception conditions | ||
615 | * and call the heavyweights to help us out. | ||
616 | */ | ||
617 | mfspr r11, SPRN_SPRG7R | ||
618 | mtcr r11 | ||
619 | mfspr r13, SPRN_SPRG5R | ||
620 | mfspr r12, SPRN_SPRG4R | ||
621 | mfspr r11, SPRN_SPRG1 | ||
622 | mfspr r10, SPRN_SPRG0 | ||
623 | b InstructionStorage | ||
624 | |||
625 | #ifdef CONFIG_SPE | ||
626 | /* SPE Unavailable */ | ||
627 | START_EXCEPTION(SPEUnavailable) | ||
628 | NORMAL_EXCEPTION_PROLOG | ||
629 | bne load_up_spe | ||
630 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
631 | EXC_XFER_EE_LITE(0x2010, KernelSPE) | ||
632 | #else | ||
633 | EXCEPTION(0x2020, SPEUnavailable, UnknownException, EXC_XFER_EE) | ||
634 | #endif /* CONFIG_SPE */ | ||
635 | |||
636 | /* SPE Floating Point Data */ | ||
637 | #ifdef CONFIG_SPE | ||
638 | EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); | ||
639 | #else | ||
640 | EXCEPTION(0x2040, SPEFloatingPointData, UnknownException, EXC_XFER_EE) | ||
641 | #endif /* CONFIG_SPE */ | ||
642 | |||
643 | /* SPE Floating Point Round */ | ||
644 | EXCEPTION(0x2050, SPEFloatingPointRound, UnknownException, EXC_XFER_EE) | ||
645 | |||
646 | /* Performance Monitor */ | ||
647 | EXCEPTION(0x2060, PerformanceMonitor, PerformanceMonitorException, EXC_XFER_STD) | ||
648 | |||
649 | |||
650 | /* Debug Interrupt */ | ||
651 | DEBUG_EXCEPTION | ||
652 | |||
653 | /* | ||
654 | * Local functions | ||
655 | */ | ||
656 | /* | ||
657 | * Data TLB exceptions will bail out to this point | ||
658 | * if they can't resolve the lightweight TLB fault. | ||
659 | */ | ||
660 | data_access: | ||
661 | NORMAL_EXCEPTION_PROLOG | ||
662 | mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ | ||
663 | stw r5,_ESR(r11) | ||
664 | mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ | ||
665 | andis. r10,r5,(ESR_ILK|ESR_DLK)@h | ||
666 | bne 1f | ||
667 | EXC_XFER_EE_LITE(0x0300, handle_page_fault) | ||
668 | 1: | ||
669 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
670 | EXC_XFER_EE_LITE(0x0300, CacheLockingException) | ||
671 | |||
672 | /* | ||
673 | |||
674 | * Both the instruction and data TLB miss get to this | ||
675 | * point to load the TLB. | ||
676 | * r10 - EA of fault | ||
677 | * r11 - TLB (info from Linux PTE) | ||
678 | * r12, r13 - available to use | ||
679 | * CR5 - results of addr < TASK_SIZE | ||
680 | * MAS0, MAS1 - loaded with proper value when we get here | ||
681 | * MAS2, MAS3 - will need additional info from Linux PTE | ||
682 | * Upon exit, we reload everything and RFI. | ||
683 | */ | ||
684 | finish_tlb_load: | ||
685 | /* | ||
686 | * We set execute, because we don't have the granularity to | ||
687 | * properly set this at the page level (Linux problem). | ||
688 | * Many of these bits are software only. Bits we don't set | ||
689 | * here we (properly should) assume have the appropriate value. | ||
690 | */ | ||
691 | |||
692 | mfspr r12, SPRN_MAS2 | ||
693 | rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ | ||
694 | mtspr SPRN_MAS2, r12 | ||
695 | |||
696 | bge 5, 1f | ||
697 | |||
698 | /* addr > TASK_SIZE */ | ||
699 | li r10, (MAS3_UX | MAS3_UW | MAS3_UR) | ||
700 | andi. r13, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC) | ||
701 | andi. r12, r11, _PAGE_USER /* Test for _PAGE_USER */ | ||
702 | iseleq r12, 0, r10 | ||
703 | and r10, r12, r13 | ||
704 | srwi r12, r10, 1 | ||
705 | or r12, r12, r10 /* Copy user perms into supervisor */ | ||
706 | b 2f | ||
707 | |||
708 | /* addr <= TASK_SIZE */ | ||
709 | 1: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */ | ||
710 | ori r12, r12, (MAS3_SX | MAS3_SR) | ||
711 | |||
712 | 2: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ | ||
713 | mtspr SPRN_MAS3, r11 | ||
714 | tlbwe | ||
715 | |||
716 | /* Done...restore registers and get out of here. */ | ||
717 | mfspr r11, SPRN_SPRG7R | ||
718 | mtcr r11 | ||
719 | mfspr r13, SPRN_SPRG5R | ||
720 | mfspr r12, SPRN_SPRG4R | ||
721 | mfspr r11, SPRN_SPRG1 | ||
722 | mfspr r10, SPRN_SPRG0 | ||
723 | rfi /* Force context change */ | ||
724 | |||
725 | #ifdef CONFIG_SPE | ||
726 | /* Note that the SPE support is closely modeled after the AltiVec | ||
727 | * support. Changes to one are likely to be applicable to the | ||
728 | * other! */ | ||
729 | load_up_spe: | ||
730 | /* | ||
731 | * Disable SPE for the task which had SPE previously, | ||
732 | * and save its SPE registers in its thread_struct. | ||
733 | * Enables SPE for use in the kernel on return. | ||
734 | * On SMP we know the SPE units are free, since we give it up every | ||
735 | * switch. -- Kumar | ||
736 | */ | ||
737 | mfmsr r5 | ||
738 | oris r5,r5,MSR_SPE@h | ||
739 | mtmsr r5 /* enable use of SPE now */ | ||
740 | isync | ||
741 | /* | ||
742 | * For SMP, we don't do lazy SPE switching because it just gets too | ||
743 | * horrendously complex, especially when a task switches from one CPU | ||
744 | * to another. Instead we call giveup_spe in switch_to. | ||
745 | */ | ||
746 | #ifndef CONFIG_SMP | ||
747 | lis r3,last_task_used_spe@ha | ||
748 | lwz r4,last_task_used_spe@l(r3) | ||
749 | cmpi 0,r4,0 | ||
750 | beq 1f | ||
751 | addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ | ||
752 | SAVE_32EVR(0,r10,r4) | ||
753 | evxor evr10, evr10, evr10 /* clear out evr10 */ | ||
754 | evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ | ||
755 | li r5,THREAD_ACC | ||
756 | evstddx evr10, r4, r5 /* save off accumulator */ | ||
757 | lwz r5,PT_REGS(r4) | ||
758 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
759 | lis r10,MSR_SPE@h | ||
760 | andc r4,r4,r10 /* disable SPE for previous task */ | ||
761 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
762 | 1: | ||
763 | #endif /* CONFIG_SMP */ | ||
764 | /* enable use of SPE after return */ | ||
765 | oris r9,r9,MSR_SPE@h | ||
766 | mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ | ||
767 | li r4,1 | ||
768 | li r10,THREAD_ACC | ||
769 | stw r4,THREAD_USED_SPE(r5) | ||
770 | evlddx evr4,r10,r5 | ||
771 | evmra evr4,evr4 | ||
772 | REST_32EVR(0,r10,r5) | ||
773 | #ifndef CONFIG_SMP | ||
774 | subi r4,r5,THREAD | ||
775 | stw r4,last_task_used_spe@l(r3) | ||
776 | #endif /* CONFIG_SMP */ | ||
777 | /* restore registers and return */ | ||
778 | 2: REST_4GPRS(3, r11) | ||
779 | lwz r10,_CCR(r11) | ||
780 | REST_GPR(1, r11) | ||
781 | mtcr r10 | ||
782 | lwz r10,_LINK(r11) | ||
783 | mtlr r10 | ||
784 | REST_GPR(10, r11) | ||
785 | mtspr SPRN_SRR1,r9 | ||
786 | mtspr SPRN_SRR0,r12 | ||
787 | REST_GPR(9, r11) | ||
788 | REST_GPR(12, r11) | ||
789 | lwz r11,GPR11(r11) | ||
790 | SYNC | ||
791 | rfi | ||
792 | |||
793 | /* | ||
794 | * SPE unavailable trap from kernel - print a message, but let | ||
795 | * the task use SPE in the kernel until it returns to user mode. | ||
796 | */ | ||
797 | KernelSPE: | ||
798 | lwz r3,_MSR(r1) | ||
799 | oris r3,r3,MSR_SPE@h | ||
800 | stw r3,_MSR(r1) /* enable use of SPE after return */ | ||
801 | lis r3,87f@h | ||
802 | ori r3,r3,87f@l | ||
803 | mr r4,r2 /* current */ | ||
804 | lwz r5,_NIP(r1) | ||
805 | bl printk | ||
806 | b ret_from_except | ||
807 | 87: .string "SPE used in kernel (task=%p, pc=%x) \n" | ||
808 | .align 4,0 | ||
809 | |||
810 | #endif /* CONFIG_SPE */ | ||
811 | |||
812 | /* | ||
813 | * Global functions | ||
814 | */ | ||
815 | |||
816 | /* | ||
817 | * extern void loadcam_entry(unsigned int index) | ||
818 | * | ||
819 | * Load TLBCAM[index] entry in to the L2 CAM MMU | ||
820 | */ | ||
821 | _GLOBAL(loadcam_entry) | ||
822 | lis r4,TLBCAM@ha | ||
823 | addi r4,r4,TLBCAM@l | ||
824 | mulli r5,r3,20 | ||
825 | add r3,r5,r4 | ||
826 | lwz r4,0(r3) | ||
827 | mtspr SPRN_MAS0,r4 | ||
828 | lwz r4,4(r3) | ||
829 | mtspr SPRN_MAS1,r4 | ||
830 | lwz r4,8(r3) | ||
831 | mtspr SPRN_MAS2,r4 | ||
832 | lwz r4,12(r3) | ||
833 | mtspr SPRN_MAS3,r4 | ||
834 | tlbwe | ||
835 | isync | ||
836 | blr | ||
837 | |||
838 | /* | ||
839 | * extern void giveup_altivec(struct task_struct *prev) | ||
840 | * | ||
841 | * The e500 core does not have an AltiVec unit. | ||
842 | */ | ||
843 | _GLOBAL(giveup_altivec) | ||
844 | blr | ||
845 | |||
846 | #ifdef CONFIG_SPE | ||
847 | /* | ||
848 | * extern void giveup_spe(struct task_struct *prev) | ||
849 | * | ||
850 | */ | ||
851 | _GLOBAL(giveup_spe) | ||
852 | mfmsr r5 | ||
853 | oris r5,r5,MSR_SPE@h | ||
854 | SYNC | ||
855 | mtmsr r5 /* enable use of SPE now */ | ||
856 | isync | ||
857 | cmpi 0,r3,0 | ||
858 | beqlr- /* if no previous owner, done */ | ||
859 | addi r3,r3,THREAD /* want THREAD of task */ | ||
860 | lwz r5,PT_REGS(r3) | ||
861 | cmpi 0,r5,0 | ||
862 | SAVE_32EVR(0, r4, r3) | ||
863 | evxor evr6, evr6, evr6 /* clear out evr6 */ | ||
864 | evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ | ||
865 | li r4,THREAD_ACC | ||
866 | evstddx evr6, r4, r3 /* save off accumulator */ | ||
867 | mfspr r6,SPRN_SPEFSCR | ||
868 | stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */ | ||
869 | beq 1f | ||
870 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
871 | lis r3,MSR_SPE@h | ||
872 | andc r4,r4,r3 /* disable SPE for previous task */ | ||
873 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
874 | 1: | ||
875 | #ifndef CONFIG_SMP | ||
876 | li r5,0 | ||
877 | lis r4,last_task_used_spe@ha | ||
878 | stw r5,last_task_used_spe@l(r4) | ||
879 | #endif /* CONFIG_SMP */ | ||
880 | blr | ||
881 | #endif /* CONFIG_SPE */ | ||
882 | |||
883 | /* | ||
884 | * extern void giveup_fpu(struct task_struct *prev) | ||
885 | * | ||
886 | * The e500 core does not have an FPU. | ||
887 | */ | ||
888 | _GLOBAL(giveup_fpu) | ||
889 | blr | ||
890 | |||
891 | /* | ||
892 | * extern void abort(void) | ||
893 | * | ||
894 | * At present, this routine just applies a system reset. | ||
895 | */ | ||
896 | _GLOBAL(abort) | ||
897 | li r13,0 | ||
898 | mtspr SPRN_DBCR0,r13 /* disable all debug events */ | ||
899 | mfmsr r13 | ||
900 | ori r13,r13,MSR_DE@l /* Enable Debug Events */ | ||
901 | mtmsr r13 | ||
902 | mfspr r13,SPRN_DBCR0 | ||
903 | lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h | ||
904 | mtspr SPRN_DBCR0,r13 | ||
905 | |||
906 | _GLOBAL(set_context) | ||
907 | |||
908 | #ifdef CONFIG_BDI_SWITCH | ||
909 | /* Context switch the PTE pointer for the Abatron BDI2000. | ||
910 | * The PGDIR is the second parameter. | ||
911 | */ | ||
912 | lis r5, abatron_pteptrs@h | ||
913 | ori r5, r5, abatron_pteptrs@l | ||
914 | stw r4, 0x4(r5) | ||
915 | #endif | ||
916 | mtspr SPRN_PID,r3 | ||
917 | isync /* Force context change */ | ||
918 | blr | ||
919 | |||
920 | /* | ||
921 | * We put a few things here that have to be page-aligned. This stuff | ||
922 | * goes at the beginning of the data segment, which is page-aligned. | ||
923 | */ | ||
924 | .data | ||
925 | _GLOBAL(sdata) | ||
926 | _GLOBAL(empty_zero_page) | ||
927 | .space 4096 | ||
928 | _GLOBAL(swapper_pg_dir) | ||
929 | .space 4096 | ||
930 | |||
931 | /* Reserved 4k for the critical exception stack & 4k for the machine | ||
932 | * check stack per CPU for kernel mode exceptions */ | ||
933 | .section .bss | ||
934 | .align 12 | ||
935 | exception_stack_bottom: | ||
936 | .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS | ||
937 | _GLOBAL(exception_stack_top) | ||
938 | |||
939 | /* | ||
940 | * This space gets a copy of optional info passed to us by the bootstrap | ||
941 | * which is used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
942 | */ | ||
943 | _GLOBAL(cmd_line) | ||
944 | .space 512 | ||
945 | |||
946 | /* | ||
947 | * Room for two PTE pointers, usually the kernel and current user pointers | ||
948 | * to their respective root page table. | ||
949 | */ | ||
950 | abatron_pteptrs: | ||
951 | .space 8 | ||
952 | |||
diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c new file mode 100644 index 000000000000..53547b6de45b --- /dev/null +++ b/arch/ppc/kernel/idle.c | |||
@@ -0,0 +1,100 @@ | |||
1 | /* | ||
2 | * Idle daemon for PowerPC. Idle daemon will handle any action | ||
3 | * that needs to be taken when the system becomes idle. | ||
4 | * | ||
5 | * Written by Cort Dougan (cort@cs.nmt.edu). Subsequently hacked | ||
6 | * on by Tom Rini, Armin Kuster, Paul Mackerras and others. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | */ | ||
13 | #include <linux/config.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/smp.h> | ||
19 | #include <linux/smp_lock.h> | ||
20 | #include <linux/stddef.h> | ||
21 | #include <linux/unistd.h> | ||
22 | #include <linux/ptrace.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/sysctl.h> | ||
25 | |||
26 | #include <asm/pgtable.h> | ||
27 | #include <asm/uaccess.h> | ||
28 | #include <asm/system.h> | ||
29 | #include <asm/io.h> | ||
30 | #include <asm/mmu.h> | ||
31 | #include <asm/cache.h> | ||
32 | #include <asm/cputable.h> | ||
33 | #include <asm/machdep.h> | ||
34 | |||
35 | void default_idle(void) | ||
36 | { | ||
37 | void (*powersave)(void); | ||
38 | |||
39 | powersave = ppc_md.power_save; | ||
40 | |||
41 | if (!need_resched()) { | ||
42 | if (powersave != NULL) | ||
43 | powersave(); | ||
44 | #ifdef CONFIG_SMP | ||
45 | else { | ||
46 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
47 | while (!need_resched()) | ||
48 | barrier(); | ||
49 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
50 | } | ||
51 | #endif | ||
52 | } | ||
53 | if (need_resched()) | ||
54 | schedule(); | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * The body of the idle task. | ||
59 | */ | ||
60 | void cpu_idle(void) | ||
61 | { | ||
62 | for (;;) | ||
63 | if (ppc_md.idle != NULL) | ||
64 | ppc_md.idle(); | ||
65 | else | ||
66 | default_idle(); | ||
67 | } | ||
68 | |||
69 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_6xx) | ||
70 | /* | ||
71 | * Register the sysctl to set/clear powersave_nap. | ||
72 | */ | ||
73 | extern unsigned long powersave_nap; | ||
74 | |||
75 | static ctl_table powersave_nap_ctl_table[]={ | ||
76 | { | ||
77 | .ctl_name = KERN_PPC_POWERSAVE_NAP, | ||
78 | .procname = "powersave-nap", | ||
79 | .data = &powersave_nap, | ||
80 | .maxlen = sizeof(int), | ||
81 | .mode = 0644, | ||
82 | .proc_handler = &proc_dointvec, | ||
83 | }, | ||
84 | { 0, }, | ||
85 | }; | ||
86 | static ctl_table powersave_nap_sysctl_root[] = { | ||
87 | { 1, "kernel", NULL, 0, 0755, powersave_nap_ctl_table, }, | ||
88 | { 0,}, | ||
89 | }; | ||
90 | |||
91 | static int __init | ||
92 | register_powersave_nap_sysctl(void) | ||
93 | { | ||
94 | register_sysctl_table(powersave_nap_sysctl_root, 0); | ||
95 | |||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | __initcall(register_powersave_nap_sysctl); | ||
100 | #endif | ||
diff --git a/arch/ppc/kernel/idle_6xx.S b/arch/ppc/kernel/idle_6xx.S new file mode 100644 index 000000000000..25d009c75f7b --- /dev/null +++ b/arch/ppc/kernel/idle_6xx.S | |||
@@ -0,0 +1,233 @@ | |||
1 | /* | ||
2 | * This file contains the power_save function for 6xx & 7xxx CPUs | ||
3 | * rewritten in assembler | ||
4 | * | ||
5 | * Warning ! This code assumes that if your machine has a 750fx | ||
6 | * it will have PLL 1 set to low speed mode (used during NAP/DOZE). | ||
7 | * if this is not the case some additional changes will have to | ||
8 | * be done to check a runtime var (a bit like powersave-nap) | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/config.h> | ||
17 | #include <linux/threads.h> | ||
18 | #include <asm/processor.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/cputable.h> | ||
21 | #include <asm/thread_info.h> | ||
22 | #include <asm/ppc_asm.h> | ||
23 | #include <asm/offsets.h> | ||
24 | |||
25 | #undef DEBUG | ||
26 | |||
27 | .text | ||
28 | |||
29 | /* | ||
30 | * Init idle, called at early CPU setup time from head.S for each CPU | ||
31 | * Make sure no rest of NAP mode remains in HID0, save default | ||
32 | * values for some CPU specific registers. Called with r24 | ||
33 | * containing CPU number and r3 reloc offset | ||
34 | */ | ||
35 | _GLOBAL(init_idle_6xx) | ||
36 | BEGIN_FTR_SECTION | ||
37 | mfspr r4,SPRN_HID0 | ||
38 | rlwinm r4,r4,0,10,8 /* Clear NAP */ | ||
39 | mtspr SPRN_HID0, r4 | ||
40 | b 1f | ||
41 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) | ||
42 | blr | ||
43 | 1: | ||
44 | slwi r5,r24,2 | ||
45 | add r5,r5,r3 | ||
46 | BEGIN_FTR_SECTION | ||
47 | mfspr r4,SPRN_MSSCR0 | ||
48 | addis r6,r5, nap_save_msscr0@ha | ||
49 | stw r4,nap_save_msscr0@l(r6) | ||
50 | END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) | ||
51 | BEGIN_FTR_SECTION | ||
52 | mfspr r4,SPRN_HID1 | ||
53 | addis r6,r5,nap_save_hid1@ha | ||
54 | stw r4,nap_save_hid1@l(r6) | ||
55 | END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) | ||
56 | blr | ||
57 | |||
58 | /* | ||
59 | * Here is the power_save_6xx function. This could eventually be | ||
60 | * split into several functions & changing the function pointer | ||
61 | * depending on the various features. | ||
62 | */ | ||
63 | _GLOBAL(ppc6xx_idle) | ||
64 | /* Check if we can nap or doze, put HID0 mask in r3 | ||
65 | */ | ||
66 | lis r3, 0 | ||
67 | BEGIN_FTR_SECTION | ||
68 | lis r3,HID0_DOZE@h | ||
69 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) | ||
70 | BEGIN_FTR_SECTION | ||
71 | /* We must dynamically check for the NAP feature as it | ||
72 | * can be cleared by CPU init after the fixups are done | ||
73 | */ | ||
74 | lis r4,cur_cpu_spec@ha | ||
75 | lwz r4,cur_cpu_spec@l(r4) | ||
76 | lwz r4,CPU_SPEC_FEATURES(r4) | ||
77 | andi. r0,r4,CPU_FTR_CAN_NAP | ||
78 | beq 1f | ||
79 | /* Now check if user or arch enabled NAP mode */ | ||
80 | lis r4,powersave_nap@ha | ||
81 | lwz r4,powersave_nap@l(r4) | ||
82 | cmpwi 0,r4,0 | ||
83 | beq 1f | ||
84 | lis r3,HID0_NAP@h | ||
85 | 1: | ||
86 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) | ||
87 | cmpwi 0,r3,0 | ||
88 | beqlr | ||
89 | |||
90 | /* Clear MSR:EE */ | ||
91 | mfmsr r7 | ||
92 | rlwinm r0,r7,0,17,15 | ||
93 | mtmsr r0 | ||
94 | |||
95 | /* Check current_thread_info()->flags */ | ||
96 | rlwinm r4,r1,0,0,18 | ||
97 | lwz r4,TI_FLAGS(r4) | ||
98 | andi. r0,r4,_TIF_NEED_RESCHED | ||
99 | beq 1f | ||
100 | mtmsr r7 /* out of line this ? */ | ||
101 | blr | ||
102 | 1: | ||
103 | /* Some pre-nap cleanups needed on some CPUs */ | ||
104 | andis. r0,r3,HID0_NAP@h | ||
105 | beq 2f | ||
106 | BEGIN_FTR_SECTION | ||
107 | /* Disable L2 prefetch on some 745x and try to ensure | ||
108 | * L2 prefetch engines are idle. As explained by errata | ||
109 | * text, we can't be sure they are, we just hope very hard | ||
110 | * that well be enough (sic !). At least I noticed Apple | ||
111 | * doesn't even bother doing the dcbf's here... | ||
112 | */ | ||
113 | mfspr r4,SPRN_MSSCR0 | ||
114 | rlwinm r4,r4,0,0,29 | ||
115 | sync | ||
116 | mtspr SPRN_MSSCR0,r4 | ||
117 | sync | ||
118 | isync | ||
119 | lis r4,KERNELBASE@h | ||
120 | dcbf 0,r4 | ||
121 | dcbf 0,r4 | ||
122 | dcbf 0,r4 | ||
123 | dcbf 0,r4 | ||
124 | END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) | ||
125 | #ifdef DEBUG | ||
126 | lis r6,nap_enter_count@ha | ||
127 | lwz r4,nap_enter_count@l(r6) | ||
128 | addi r4,r4,1 | ||
129 | stw r4,nap_enter_count@l(r6) | ||
130 | #endif | ||
131 | 2: | ||
132 | BEGIN_FTR_SECTION | ||
133 | /* Go to low speed mode on some 750FX */ | ||
134 | lis r4,powersave_lowspeed@ha | ||
135 | lwz r4,powersave_lowspeed@l(r4) | ||
136 | cmpwi 0,r4,0 | ||
137 | beq 1f | ||
138 | mfspr r4,SPRN_HID1 | ||
139 | oris r4,r4,0x0001 | ||
140 | mtspr SPRN_HID1,r4 | ||
141 | 1: | ||
142 | END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) | ||
143 | |||
144 | /* Go to NAP or DOZE now */ | ||
145 | mfspr r4,SPRN_HID0 | ||
146 | lis r5,(HID0_NAP|HID0_SLEEP)@h | ||
147 | BEGIN_FTR_SECTION | ||
148 | oris r5,r5,HID0_DOZE@h | ||
149 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) | ||
150 | andc r4,r4,r5 | ||
151 | or r4,r4,r3 | ||
152 | BEGIN_FTR_SECTION | ||
153 | oris r4,r4,HID0_DPM@h /* that should be done once for all */ | ||
154 | END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) | ||
155 | mtspr SPRN_HID0,r4 | ||
156 | BEGIN_FTR_SECTION | ||
157 | DSSALL | ||
158 | sync | ||
159 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
160 | ori r7,r7,MSR_EE /* Could be ommited (already set) */ | ||
161 | oris r7,r7,MSR_POW@h | ||
162 | sync | ||
163 | isync | ||
164 | mtmsr r7 | ||
165 | isync | ||
166 | sync | ||
167 | blr | ||
168 | |||
169 | /* | ||
170 | * Return from NAP/DOZE mode, restore some CPU specific registers, | ||
171 | * we are called with DR/IR still off and r2 containing physical | ||
172 | * address of current. | ||
173 | */ | ||
174 | _GLOBAL(power_save_6xx_restore) | ||
175 | mfspr r11,SPRN_HID0 | ||
176 | rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */ | ||
177 | cror 4*cr1+eq,4*cr0+eq,4*cr0+eq | ||
178 | BEGIN_FTR_SECTION | ||
179 | rlwinm r11,r11,0,9,7 /* Clear DOZE */ | ||
180 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) | ||
181 | mtspr SPRN_HID0, r11 | ||
182 | |||
183 | #ifdef DEBUG | ||
184 | beq cr1,1f | ||
185 | lis r11,(nap_return_count-KERNELBASE)@ha | ||
186 | lwz r9,nap_return_count@l(r11) | ||
187 | addi r9,r9,1 | ||
188 | stw r9,nap_return_count@l(r11) | ||
189 | 1: | ||
190 | #endif | ||
191 | |||
192 | rlwinm r9,r1,0,0,18 | ||
193 | tophys(r9,r9) | ||
194 | lwz r11,TI_CPU(r9) | ||
195 | slwi r11,r11,2 | ||
196 | /* Todo make sure all these are in the same page | ||
197 | * and load r22 (@ha part + CPU offset) only once | ||
198 | */ | ||
199 | BEGIN_FTR_SECTION | ||
200 | beq cr1,1f | ||
201 | addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha | ||
202 | lwz r9,nap_save_msscr0@l(r9) | ||
203 | mtspr SPRN_MSSCR0, r9 | ||
204 | sync | ||
205 | isync | ||
206 | 1: | ||
207 | END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) | ||
208 | BEGIN_FTR_SECTION | ||
209 | addis r9,r11,(nap_save_hid1-KERNELBASE)@ha | ||
210 | lwz r9,nap_save_hid1@l(r9) | ||
211 | mtspr SPRN_HID1, r9 | ||
212 | END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) | ||
213 | b transfer_to_handler_cont | ||
214 | |||
215 | .data | ||
216 | |||
217 | _GLOBAL(nap_save_msscr0) | ||
218 | .space 4*NR_CPUS | ||
219 | |||
220 | _GLOBAL(nap_save_hid1) | ||
221 | .space 4*NR_CPUS | ||
222 | |||
223 | _GLOBAL(powersave_nap) | ||
224 | .long 0 | ||
225 | _GLOBAL(powersave_lowspeed) | ||
226 | .long 0 | ||
227 | |||
228 | #ifdef DEBUG | ||
229 | _GLOBAL(nap_enter_count) | ||
230 | .space 4 | ||
231 | _GLOBAL(nap_return_count) | ||
232 | .space 4 | ||
233 | #endif | ||
diff --git a/arch/ppc/kernel/idle_power4.S b/arch/ppc/kernel/idle_power4.S new file mode 100644 index 000000000000..73a58ff03900 --- /dev/null +++ b/arch/ppc/kernel/idle_power4.S | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * This file contains the power_save function for 6xx & 7xxx CPUs | ||
3 | * rewritten in assembler | ||
4 | * | ||
5 | * Warning ! This code assumes that if your machine has a 750fx | ||
6 | * it will have PLL 1 set to low speed mode (used during NAP/DOZE). | ||
7 | * if this is not the case some additional changes will have to | ||
8 | * be done to check a runtime var (a bit like powersave-nap) | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/config.h> | ||
17 | #include <linux/threads.h> | ||
18 | #include <asm/processor.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/cputable.h> | ||
21 | #include <asm/thread_info.h> | ||
22 | #include <asm/ppc_asm.h> | ||
23 | #include <asm/offsets.h> | ||
24 | |||
25 | #undef DEBUG | ||
26 | |||
27 | .text | ||
28 | |||
29 | /* | ||
30 | * Init idle, called at early CPU setup time from head.S for each CPU | ||
31 | * So nothing for now. Called with r24 containing CPU number and r3 | ||
32 | * reloc offset | ||
33 | */ | ||
34 | .globl init_idle_power4 | ||
35 | init_idle_power4: | ||
36 | blr | ||
37 | |||
38 | /* | ||
39 | * Here is the power_save_6xx function. This could eventually be | ||
40 | * split into several functions & changing the function pointer | ||
41 | * depending on the various features. | ||
42 | */ | ||
43 | .globl power4_idle | ||
44 | power4_idle: | ||
45 | BEGIN_FTR_SECTION | ||
46 | blr | ||
47 | END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) | ||
48 | /* We must dynamically check for the NAP feature as it | ||
49 | * can be cleared by CPU init after the fixups are done | ||
50 | */ | ||
51 | lis r4,cur_cpu_spec@ha | ||
52 | lwz r4,cur_cpu_spec@l(r4) | ||
53 | lwz r4,CPU_SPEC_FEATURES(r4) | ||
54 | andi. r0,r4,CPU_FTR_CAN_NAP | ||
55 | beqlr | ||
56 | /* Now check if user or arch enabled NAP mode */ | ||
57 | lis r4,powersave_nap@ha | ||
58 | lwz r4,powersave_nap@l(r4) | ||
59 | cmpwi 0,r4,0 | ||
60 | beqlr | ||
61 | |||
62 | /* Clear MSR:EE */ | ||
63 | mfmsr r7 | ||
64 | rlwinm r0,r7,0,17,15 | ||
65 | mtmsr r0 | ||
66 | |||
67 | /* Check current_thread_info()->flags */ | ||
68 | rlwinm r4,r1,0,0,18 | ||
69 | lwz r4,TI_FLAGS(r4) | ||
70 | andi. r0,r4,_TIF_NEED_RESCHED | ||
71 | beq 1f | ||
72 | mtmsr r7 /* out of line this ? */ | ||
73 | blr | ||
74 | 1: | ||
75 | /* Go to NAP now */ | ||
76 | BEGIN_FTR_SECTION | ||
77 | DSSALL | ||
78 | sync | ||
79 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
80 | ori r7,r7,MSR_EE /* Could be ommited (already set) */ | ||
81 | oris r7,r7,MSR_POW@h | ||
82 | sync | ||
83 | isync | ||
84 | mtmsr r7 | ||
85 | isync | ||
86 | sync | ||
87 | blr | ||
88 | |||
89 | .globl powersave_nap | ||
90 | powersave_nap: | ||
91 | .long 0 | ||
diff --git a/arch/ppc/kernel/irq.c b/arch/ppc/kernel/irq.c new file mode 100644 index 000000000000..8843f3af230f --- /dev/null +++ b/arch/ppc/kernel/irq.c | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/irq.c | ||
3 | * | ||
4 | * Derived from arch/i386/kernel/irq.c | ||
5 | * Copyright (C) 1992 Linus Torvalds | ||
6 | * Adapted from arch/i386 by Gary Thomas | ||
7 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
8 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> | ||
9 | * Copyright (C) 1996-2001 Cort Dougan | ||
10 | * Adapted for Power Macintosh by Paul Mackerras | ||
11 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) | ||
12 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
13 | * | ||
14 | * This file contains the code used by various IRQ handling routines: | ||
15 | * asking for different IRQ's should be done through these routines | ||
16 | * instead of just grabbing them. Thus setups with different IRQ numbers | ||
17 | * shouldn't result in any weird surprises, and installing new handlers | ||
18 | * should be easier. | ||
19 | * | ||
20 | * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the | ||
21 | * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit | ||
22 | * mask register (of which only 16 are defined), hence the weird shifting | ||
23 | * and complement of the cached_irq_mask. I want to be able to stuff | ||
24 | * this right into the SIU SMASK register. | ||
25 | * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx | ||
26 | * to reduce code space and undefined function references. | ||
27 | */ | ||
28 | |||
29 | #include <linux/errno.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/threads.h> | ||
32 | #include <linux/kernel_stat.h> | ||
33 | #include <linux/signal.h> | ||
34 | #include <linux/sched.h> | ||
35 | #include <linux/ptrace.h> | ||
36 | #include <linux/ioport.h> | ||
37 | #include <linux/interrupt.h> | ||
38 | #include <linux/timex.h> | ||
39 | #include <linux/config.h> | ||
40 | #include <linux/init.h> | ||
41 | #include <linux/slab.h> | ||
42 | #include <linux/pci.h> | ||
43 | #include <linux/delay.h> | ||
44 | #include <linux/irq.h> | ||
45 | #include <linux/proc_fs.h> | ||
46 | #include <linux/random.h> | ||
47 | #include <linux/seq_file.h> | ||
48 | #include <linux/cpumask.h> | ||
49 | #include <linux/profile.h> | ||
50 | #include <linux/bitops.h> | ||
51 | |||
52 | #include <asm/uaccess.h> | ||
53 | #include <asm/system.h> | ||
54 | #include <asm/io.h> | ||
55 | #include <asm/pgtable.h> | ||
56 | #include <asm/irq.h> | ||
57 | #include <asm/cache.h> | ||
58 | #include <asm/prom.h> | ||
59 | #include <asm/ptrace.h> | ||
60 | |||
61 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) | ||
62 | |||
63 | extern atomic_t ipi_recv; | ||
64 | extern atomic_t ipi_sent; | ||
65 | |||
66 | #define MAXCOUNT 10000000 | ||
67 | |||
68 | int ppc_spurious_interrupts = 0; | ||
69 | struct irqaction *ppc_irq_action[NR_IRQS]; | ||
70 | unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; | ||
71 | unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; | ||
72 | atomic_t ppc_n_lost_interrupts; | ||
73 | |||
74 | #ifdef CONFIG_TAU_INT | ||
75 | extern int tau_initialized; | ||
76 | extern int tau_interrupts(int); | ||
77 | #endif | ||
78 | |||
79 | int show_interrupts(struct seq_file *p, void *v) | ||
80 | { | ||
81 | int i = *(loff_t *) v, j; | ||
82 | struct irqaction * action; | ||
83 | unsigned long flags; | ||
84 | |||
85 | if (i == 0) { | ||
86 | seq_puts(p, " "); | ||
87 | for (j=0; j<NR_CPUS; j++) | ||
88 | if (cpu_online(j)) | ||
89 | seq_printf(p, "CPU%d ", j); | ||
90 | seq_putc(p, '\n'); | ||
91 | } | ||
92 | |||
93 | if (i < NR_IRQS) { | ||
94 | spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
95 | action = irq_desc[i].action; | ||
96 | if ( !action || !action->handler ) | ||
97 | goto skip; | ||
98 | seq_printf(p, "%3d: ", i); | ||
99 | #ifdef CONFIG_SMP | ||
100 | for (j = 0; j < NR_CPUS; j++) | ||
101 | if (cpu_online(j)) | ||
102 | seq_printf(p, "%10u ", | ||
103 | kstat_cpu(j).irqs[i]); | ||
104 | #else | ||
105 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
106 | #endif /* CONFIG_SMP */ | ||
107 | if (irq_desc[i].handler) | ||
108 | seq_printf(p, " %s ", irq_desc[i].handler->typename); | ||
109 | else | ||
110 | seq_puts(p, " None "); | ||
111 | seq_printf(p, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge "); | ||
112 | seq_printf(p, " %s", action->name); | ||
113 | for (action = action->next; action; action = action->next) | ||
114 | seq_printf(p, ", %s", action->name); | ||
115 | seq_putc(p, '\n'); | ||
116 | skip: | ||
117 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
118 | } else if (i == NR_IRQS) { | ||
119 | #ifdef CONFIG_TAU_INT | ||
120 | if (tau_initialized){ | ||
121 | seq_puts(p, "TAU: "); | ||
122 | for (j = 0; j < NR_CPUS; j++) | ||
123 | if (cpu_online(j)) | ||
124 | seq_printf(p, "%10u ", tau_interrupts(j)); | ||
125 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); | ||
126 | } | ||
127 | #endif | ||
128 | #ifdef CONFIG_SMP | ||
129 | /* should this be per processor send/receive? */ | ||
130 | seq_printf(p, "IPI (recv/sent): %10u/%u\n", | ||
131 | atomic_read(&ipi_recv), atomic_read(&ipi_sent)); | ||
132 | #endif | ||
133 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); | ||
134 | } | ||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | void do_IRQ(struct pt_regs *regs) | ||
139 | { | ||
140 | int irq, first = 1; | ||
141 | irq_enter(); | ||
142 | |||
143 | /* | ||
144 | * Every platform is required to implement ppc_md.get_irq. | ||
145 | * This function will either return an irq number or -1 to | ||
146 | * indicate there are no more pending. But the first time | ||
147 | * through the loop this means there wasn't and IRQ pending. | ||
148 | * The value -2 is for buggy hardware and means that this IRQ | ||
149 | * has already been handled. -- Tom | ||
150 | */ | ||
151 | while ((irq = ppc_md.get_irq(regs)) >= 0) { | ||
152 | __do_IRQ(irq, regs); | ||
153 | first = 0; | ||
154 | } | ||
155 | if (irq != -2 && first) | ||
156 | /* That's not SMP safe ... but who cares ? */ | ||
157 | ppc_spurious_interrupts++; | ||
158 | irq_exit(); | ||
159 | } | ||
160 | |||
161 | void __init init_IRQ(void) | ||
162 | { | ||
163 | ppc_md.init_IRQ(); | ||
164 | } | ||
diff --git a/arch/ppc/kernel/l2cr.S b/arch/ppc/kernel/l2cr.S new file mode 100644 index 000000000000..c39441048266 --- /dev/null +++ b/arch/ppc/kernel/l2cr.S | |||
@@ -0,0 +1,442 @@ | |||
1 | /* | ||
2 | L2CR functions | ||
3 | Copyright © 1997-1998 by PowerLogix R & D, Inc. | ||
4 | |||
5 | This program is free software; you can redistribute it and/or modify | ||
6 | it under the terms of the GNU General Public License as published by | ||
7 | the Free Software Foundation; either version 2 of the License, or | ||
8 | (at your option) any later version. | ||
9 | |||
10 | This program is distributed in the hope that it will be useful, | ||
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | GNU General Public License for more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License | ||
16 | along with this program; if not, write to the Free Software | ||
17 | Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
18 | */ | ||
19 | /* | ||
20 | Thur, Dec. 12, 1998. | ||
21 | - First public release, contributed by PowerLogix. | ||
22 | *********** | ||
23 | Sat, Aug. 7, 1999. | ||
24 | - Terry: Made sure code disabled interrupts before running. (Previously | ||
25 | it was assumed interrupts were already disabled). | ||
26 | - Terry: Updated for tentative G4 support. 4MB of memory is now flushed | ||
27 | instead of 2MB. (Prob. only 3 is necessary). | ||
28 | - Terry: Updated for workaround to HID0[DPM] processor bug | ||
29 | during global invalidates. | ||
30 | *********** | ||
31 | Thu, July 13, 2000. | ||
32 | - Terry: Added isync to correct for an errata. | ||
33 | |||
34 | 22 August 2001. | ||
35 | - DanM: Finally added the 7450 patch I've had for the past | ||
36 | several months. The L2CR is similar, but I'm going | ||
37 | to assume the user of this functions knows what they | ||
38 | are doing. | ||
39 | |||
40 | Author: Terry Greeniaus (tgree@phys.ualberta.ca) | ||
41 | Please e-mail updates to this file to me, thanks! | ||
42 | */ | ||
43 | #include <linux/config.h> | ||
44 | #include <asm/processor.h> | ||
45 | #include <asm/cputable.h> | ||
46 | #include <asm/ppc_asm.h> | ||
47 | #include <asm/cache.h> | ||
48 | #include <asm/page.h> | ||
49 | |||
50 | /* Usage: | ||
51 | |||
52 | When setting the L2CR register, you must do a few special | ||
53 | things. If you are enabling the cache, you must perform a | ||
54 | global invalidate. If you are disabling the cache, you must | ||
55 | flush the cache contents first. This routine takes care of | ||
56 | doing these things. When first enabling the cache, make sure | ||
57 | you pass in the L2CR you want, as well as passing in the | ||
58 | global invalidate bit set. A global invalidate will only be | ||
59 | performed if the L2I bit is set in applyThis. When enabling | ||
60 | the cache, you should also set the L2E bit in applyThis. If | ||
61 | you want to modify the L2CR contents after the cache has been | ||
62 | enabled, the recommended procedure is to first call | ||
63 | __setL2CR(0) to disable the cache and then call it again with | ||
64 | the new values for L2CR. Examples: | ||
65 | |||
66 | _setL2CR(0) - disables the cache | ||
67 | _setL2CR(0xB3A04000) - enables my G3 upgrade card: | ||
68 | - L2E set to turn on the cache | ||
69 | - L2SIZ set to 1MB | ||
70 | - L2CLK set to 1:1 | ||
71 | - L2RAM set to pipelined synchronous late-write | ||
72 | - L2I set to perform a global invalidation | ||
73 | - L2OH set to 0.5 nS | ||
74 | - L2DF set because this upgrade card | ||
75 | requires it | ||
76 | |||
77 | A similar call should work for your card. You need to know | ||
78 | the correct setting for your card and then place them in the | ||
79 | fields I have outlined above. Other fields support optional | ||
80 | features, such as L2DO which caches only data, or L2TS which | ||
81 | causes cache pushes from the L1 cache to go to the L2 cache | ||
82 | instead of to main memory. | ||
83 | |||
84 | IMPORTANT: | ||
85 | Starting with the 7450, the bits in this register have moved | ||
86 | or behave differently. The Enable, Parity Enable, Size, | ||
87 | and L2 Invalidate are the only bits that have not moved. | ||
88 | The size is read-only for these processors with internal L2 | ||
89 | cache, and the invalidate is a control as well as status. | ||
90 | -- Dan | ||
91 | |||
92 | */ | ||
93 | /* | ||
94 | * Summary: this procedure ignores the L2I bit in the value passed in, | ||
95 | * flushes the cache if it was already enabled, always invalidates the | ||
96 | * cache, then enables the cache if the L2E bit is set in the value | ||
97 | * passed in. | ||
98 | * -- paulus. | ||
99 | */ | ||
100 | _GLOBAL(_set_L2CR) | ||
101 | /* Make sure this is a 750 or 7400 chip */ | ||
102 | BEGIN_FTR_SECTION | ||
103 | li r3,-1 | ||
104 | blr | ||
105 | END_FTR_SECTION_IFCLR(CPU_FTR_L2CR) | ||
106 | |||
107 | mflr r9 | ||
108 | |||
109 | /* Stop DST streams */ | ||
110 | BEGIN_FTR_SECTION | ||
111 | DSSALL | ||
112 | sync | ||
113 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
114 | |||
115 | /* Turn off interrupts and data relocation. */ | ||
116 | mfmsr r7 /* Save MSR in r7 */ | ||
117 | rlwinm r4,r7,0,17,15 | ||
118 | rlwinm r4,r4,0,28,26 /* Turn off DR bit */ | ||
119 | sync | ||
120 | mtmsr r4 | ||
121 | isync | ||
122 | |||
123 | /* Before we perform the global invalidation, we must disable dynamic | ||
124 | * power management via HID0[DPM] to work around a processor bug where | ||
125 | * DPM can possibly interfere with the state machine in the processor | ||
126 | * that invalidates the L2 cache tags. | ||
127 | */ | ||
128 | mfspr r8,SPRN_HID0 /* Save HID0 in r8 */ | ||
129 | rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */ | ||
130 | sync | ||
131 | mtspr SPRN_HID0,r4 /* Disable DPM */ | ||
132 | sync | ||
133 | |||
134 | /* Get the current enable bit of the L2CR into r4 */ | ||
135 | mfspr r4,SPRN_L2CR | ||
136 | |||
137 | /* Tweak some bits */ | ||
138 | rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */ | ||
139 | rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */ | ||
140 | rlwinm r3,r3,0,1,31 /* Turn off the enable bit */ | ||
141 | |||
142 | /* Check to see if we need to flush */ | ||
143 | rlwinm. r4,r4,0,0,0 | ||
144 | beq 2f | ||
145 | |||
146 | /* Flush the cache. First, read the first 4MB of memory (physical) to | ||
147 | * put new data in the cache. (Actually we only need | ||
148 | * the size of the L2 cache plus the size of the L1 cache, but 4MB will | ||
149 | * cover everything just to be safe). | ||
150 | */ | ||
151 | |||
152 | /**** Might be a good idea to set L2DO here - to prevent instructions | ||
153 | from getting into the cache. But since we invalidate | ||
154 | the next time we enable the cache it doesn't really matter. | ||
155 | Don't do this unless you accomodate all processor variations. | ||
156 | The bit moved on the 7450..... | ||
157 | ****/ | ||
158 | |||
159 | /* TODO: use HW flush assist when available */ | ||
160 | |||
161 | lis r4,0x0002 | ||
162 | mtctr r4 | ||
163 | li r4,0 | ||
164 | 1: | ||
165 | lwzx r0,r0,r4 | ||
166 | addi r4,r4,32 /* Go to start of next cache line */ | ||
167 | bdnz 1b | ||
168 | isync | ||
169 | |||
170 | /* Now, flush the first 4MB of memory */ | ||
171 | lis r4,0x0002 | ||
172 | mtctr r4 | ||
173 | li r4,0 | ||
174 | sync | ||
175 | 1: | ||
176 | dcbf 0,r4 | ||
177 | addi r4,r4,32 /* Go to start of next cache line */ | ||
178 | bdnz 1b | ||
179 | |||
180 | 2: | ||
181 | /* Set up the L2CR configuration bits (and switch L2 off) */ | ||
182 | /* CPU errata: Make sure the mtspr below is already in the | ||
183 | * L1 icache | ||
184 | */ | ||
185 | b 20f | ||
186 | .balign L1_CACHE_LINE_SIZE | ||
187 | 22: | ||
188 | sync | ||
189 | mtspr SPRN_L2CR,r3 | ||
190 | sync | ||
191 | b 23f | ||
192 | 20: | ||
193 | b 21f | ||
194 | 21: sync | ||
195 | isync | ||
196 | b 22b | ||
197 | |||
198 | 23: | ||
199 | /* Perform a global invalidation */ | ||
200 | oris r3,r3,0x0020 | ||
201 | sync | ||
202 | mtspr SPRN_L2CR,r3 | ||
203 | sync | ||
204 | isync /* For errata */ | ||
205 | |||
206 | BEGIN_FTR_SECTION | ||
207 | /* On the 7450, we wait for the L2I bit to clear...... | ||
208 | */ | ||
209 | 10: mfspr r3,SPRN_L2CR | ||
210 | andis. r4,r3,0x0020 | ||
211 | bne 10b | ||
212 | b 11f | ||
213 | END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) | ||
214 | |||
215 | /* Wait for the invalidation to complete */ | ||
216 | 3: mfspr r3,SPRN_L2CR | ||
217 | rlwinm. r4,r3,0,31,31 | ||
218 | bne 3b | ||
219 | |||
220 | 11: rlwinm r3,r3,0,11,9 /* Turn off the L2I bit */ | ||
221 | sync | ||
222 | mtspr SPRN_L2CR,r3 | ||
223 | sync | ||
224 | |||
225 | /* See if we need to enable the cache */ | ||
226 | cmplwi r5,0 | ||
227 | beq 4f | ||
228 | |||
229 | /* Enable the cache */ | ||
230 | oris r3,r3,0x8000 | ||
231 | mtspr SPRN_L2CR,r3 | ||
232 | sync | ||
233 | |||
234 | 4: | ||
235 | |||
236 | /* Restore HID0[DPM] to whatever it was before */ | ||
237 | sync | ||
238 | mtspr 1008,r8 | ||
239 | sync | ||
240 | |||
241 | /* Restore MSR (restores EE and DR bits to original state) */ | ||
242 | SYNC | ||
243 | mtmsr r7 | ||
244 | isync | ||
245 | |||
246 | mtlr r9 | ||
247 | blr | ||
248 | |||
249 | _GLOBAL(_get_L2CR) | ||
250 | /* Return the L2CR contents */ | ||
251 | li r3,0 | ||
252 | BEGIN_FTR_SECTION | ||
253 | mfspr r3,SPRN_L2CR | ||
254 | END_FTR_SECTION_IFSET(CPU_FTR_L2CR) | ||
255 | blr | ||
256 | |||
257 | |||
258 | /* | ||
259 | * Here is a similar routine for dealing with the L3 cache | ||
260 | * on the 745x family of chips | ||
261 | */ | ||
262 | |||
263 | _GLOBAL(_set_L3CR) | ||
264 | /* Make sure this is a 745x chip */ | ||
265 | BEGIN_FTR_SECTION | ||
266 | li r3,-1 | ||
267 | blr | ||
268 | END_FTR_SECTION_IFCLR(CPU_FTR_L3CR) | ||
269 | |||
270 | /* Turn off interrupts and data relocation. */ | ||
271 | mfmsr r7 /* Save MSR in r7 */ | ||
272 | rlwinm r4,r7,0,17,15 | ||
273 | rlwinm r4,r4,0,28,26 /* Turn off DR bit */ | ||
274 | sync | ||
275 | mtmsr r4 | ||
276 | isync | ||
277 | |||
278 | /* Stop DST streams */ | ||
279 | DSSALL | ||
280 | sync | ||
281 | |||
282 | /* Get the current enable bit of the L3CR into r4 */ | ||
283 | mfspr r4,SPRN_L3CR | ||
284 | |||
285 | /* Tweak some bits */ | ||
286 | rlwinm r5,r3,0,0,0 /* r5 contains the new enable bit */ | ||
287 | rlwinm r3,r3,0,22,20 /* Turn off the invalidate bit */ | ||
288 | rlwinm r3,r3,0,2,31 /* Turn off the enable & PE bits */ | ||
289 | rlwinm r3,r3,0,5,3 /* Turn off the clken bit */ | ||
290 | /* Check to see if we need to flush */ | ||
291 | rlwinm. r4,r4,0,0,0 | ||
292 | beq 2f | ||
293 | |||
294 | /* Flush the cache. | ||
295 | */ | ||
296 | |||
297 | /* TODO: use HW flush assist */ | ||
298 | |||
299 | lis r4,0x0008 | ||
300 | mtctr r4 | ||
301 | li r4,0 | ||
302 | 1: | ||
303 | lwzx r0,r0,r4 | ||
304 | dcbf 0,r4 | ||
305 | addi r4,r4,32 /* Go to start of next cache line */ | ||
306 | bdnz 1b | ||
307 | |||
308 | 2: | ||
309 | /* Set up the L3CR configuration bits (and switch L3 off) */ | ||
310 | sync | ||
311 | mtspr SPRN_L3CR,r3 | ||
312 | sync | ||
313 | |||
314 | oris r3,r3,L3CR_L3RES@h /* Set reserved bit 5 */ | ||
315 | mtspr SPRN_L3CR,r3 | ||
316 | sync | ||
317 | oris r3,r3,L3CR_L3CLKEN@h /* Set clken */ | ||
318 | mtspr SPRN_L3CR,r3 | ||
319 | sync | ||
320 | |||
321 | /* Wait for stabilize */ | ||
322 | li r0,256 | ||
323 | mtctr r0 | ||
324 | 1: bdnz 1b | ||
325 | |||
326 | /* Perform a global invalidation */ | ||
327 | ori r3,r3,0x0400 | ||
328 | sync | ||
329 | mtspr SPRN_L3CR,r3 | ||
330 | sync | ||
331 | isync | ||
332 | |||
333 | /* We wait for the L3I bit to clear...... */ | ||
334 | 10: mfspr r3,SPRN_L3CR | ||
335 | andi. r4,r3,0x0400 | ||
336 | bne 10b | ||
337 | |||
338 | /* Clear CLKEN */ | ||
339 | rlwinm r3,r3,0,5,3 /* Turn off the clken bit */ | ||
340 | mtspr SPRN_L3CR,r3 | ||
341 | sync | ||
342 | |||
343 | /* Wait for stabilize */ | ||
344 | li r0,256 | ||
345 | mtctr r0 | ||
346 | 1: bdnz 1b | ||
347 | |||
348 | /* See if we need to enable the cache */ | ||
349 | cmplwi r5,0 | ||
350 | beq 4f | ||
351 | |||
352 | /* Enable the cache */ | ||
353 | oris r3,r3,(L3CR_L3E | L3CR_L3CLKEN)@h | ||
354 | mtspr SPRN_L3CR,r3 | ||
355 | sync | ||
356 | |||
357 | /* Wait for stabilize */ | ||
358 | li r0,256 | ||
359 | mtctr r0 | ||
360 | 1: bdnz 1b | ||
361 | |||
362 | /* Restore MSR (restores EE and DR bits to original state) */ | ||
363 | 4: SYNC | ||
364 | mtmsr r7 | ||
365 | isync | ||
366 | blr | ||
367 | |||
368 | _GLOBAL(_get_L3CR) | ||
369 | /* Return the L3CR contents */ | ||
370 | li r3,0 | ||
371 | BEGIN_FTR_SECTION | ||
372 | mfspr r3,SPRN_L3CR | ||
373 | END_FTR_SECTION_IFSET(CPU_FTR_L3CR) | ||
374 | blr | ||
375 | |||
376 | /* --- End of PowerLogix code --- | ||
377 | */ | ||
378 | |||
379 | |||
380 | /* flush_disable_L1() - Flush and disable L1 cache | ||
381 | * | ||
382 | * clobbers r0, r3, ctr, cr0 | ||
383 | * Must be called with interrupts disabled and MMU enabled. | ||
384 | */ | ||
385 | _GLOBAL(__flush_disable_L1) | ||
386 | /* Stop pending alitvec streams and memory accesses */ | ||
387 | BEGIN_FTR_SECTION | ||
388 | DSSALL | ||
389 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
390 | sync | ||
391 | |||
392 | /* Load counter to 0x4000 cache lines (512k) and | ||
393 | * load cache with datas | ||
394 | */ | ||
395 | li r3,0x4000 /* 512kB / 32B */ | ||
396 | mtctr r3 | ||
397 | lis r3,KERNELBASE@h | ||
398 | 1: | ||
399 | lwz r0,0(r3) | ||
400 | addi r3,r3,0x0020 /* Go to start of next cache line */ | ||
401 | bdnz 1b | ||
402 | isync | ||
403 | sync | ||
404 | |||
405 | /* Now flush those cache lines */ | ||
406 | li r3,0x4000 /* 512kB / 32B */ | ||
407 | mtctr r3 | ||
408 | lis r3,KERNELBASE@h | ||
409 | 1: | ||
410 | dcbf 0,r3 | ||
411 | addi r3,r3,0x0020 /* Go to start of next cache line */ | ||
412 | bdnz 1b | ||
413 | sync | ||
414 | |||
415 | /* We can now disable the L1 cache (HID0:DCE, HID0:ICE) */ | ||
416 | mfspr r3,SPRN_HID0 | ||
417 | rlwinm r3,r3,0,18,15 | ||
418 | mtspr SPRN_HID0,r3 | ||
419 | sync | ||
420 | isync | ||
421 | blr | ||
422 | |||
423 | /* inval_enable_L1 - Invalidate and enable L1 cache | ||
424 | * | ||
425 | * Assumes L1 is already disabled and MSR:EE is off | ||
426 | * | ||
427 | * clobbers r3 | ||
428 | */ | ||
429 | _GLOBAL(__inval_enable_L1) | ||
430 | /* Enable and then Flash inval the instruction & data cache */ | ||
431 | mfspr r3,SPRN_HID0 | ||
432 | ori r3,r3, HID0_ICE|HID0_ICFI|HID0_DCE|HID0_DCI | ||
433 | sync | ||
434 | isync | ||
435 | mtspr SPRN_HID0,r3 | ||
436 | xori r3,r3, HID0_ICFI|HID0_DCI | ||
437 | mtspr SPRN_HID0,r3 | ||
438 | sync | ||
439 | |||
440 | blr | ||
441 | |||
442 | |||
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S new file mode 100644 index 000000000000..73f7c23b0dd4 --- /dev/null +++ b/arch/ppc/kernel/misc.S | |||
@@ -0,0 +1,1453 @@ | |||
1 | /* | ||
2 | * This file contains miscellaneous low-level functions. | ||
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
4 | * | ||
5 | * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) | ||
6 | * and Paul Mackerras. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/config.h> | ||
16 | #include <linux/sys.h> | ||
17 | #include <asm/unistd.h> | ||
18 | #include <asm/errno.h> | ||
19 | #include <asm/processor.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/cache.h> | ||
22 | #include <asm/cputable.h> | ||
23 | #include <asm/mmu.h> | ||
24 | #include <asm/ppc_asm.h> | ||
25 | #include <asm/thread_info.h> | ||
26 | #include <asm/offsets.h> | ||
27 | |||
28 | .text | ||
29 | |||
30 | .align 5 | ||
31 | _GLOBAL(__delay) | ||
32 | cmpwi 0,r3,0 | ||
33 | mtctr r3 | ||
34 | beqlr | ||
35 | 1: bdnz 1b | ||
36 | blr | ||
37 | |||
38 | /* | ||
39 | * Returns (address we're running at) - (address we were linked at) | ||
40 | * for use before the text and data are mapped to KERNELBASE. | ||
41 | */ | ||
42 | _GLOBAL(reloc_offset) | ||
43 | mflr r0 | ||
44 | bl 1f | ||
45 | 1: mflr r3 | ||
46 | lis r4,1b@ha | ||
47 | addi r4,r4,1b@l | ||
48 | subf r3,r4,r3 | ||
49 | mtlr r0 | ||
50 | blr | ||
51 | |||
52 | /* | ||
53 | * add_reloc_offset(x) returns x + reloc_offset(). | ||
54 | */ | ||
55 | _GLOBAL(add_reloc_offset) | ||
56 | mflr r0 | ||
57 | bl 1f | ||
58 | 1: mflr r5 | ||
59 | lis r4,1b@ha | ||
60 | addi r4,r4,1b@l | ||
61 | subf r5,r4,r5 | ||
62 | add r3,r3,r5 | ||
63 | mtlr r0 | ||
64 | blr | ||
65 | |||
66 | /* | ||
67 | * sub_reloc_offset(x) returns x - reloc_offset(). | ||
68 | */ | ||
69 | _GLOBAL(sub_reloc_offset) | ||
70 | mflr r0 | ||
71 | bl 1f | ||
72 | 1: mflr r5 | ||
73 | lis r4,1b@ha | ||
74 | addi r4,r4,1b@l | ||
75 | subf r5,r4,r5 | ||
76 | subf r3,r5,r3 | ||
77 | mtlr r0 | ||
78 | blr | ||
79 | |||
80 | /* | ||
81 | * reloc_got2 runs through the .got2 section adding an offset | ||
82 | * to each entry. | ||
83 | */ | ||
84 | _GLOBAL(reloc_got2) | ||
85 | mflr r11 | ||
86 | lis r7,__got2_start@ha | ||
87 | addi r7,r7,__got2_start@l | ||
88 | lis r8,__got2_end@ha | ||
89 | addi r8,r8,__got2_end@l | ||
90 | subf r8,r7,r8 | ||
91 | srwi. r8,r8,2 | ||
92 | beqlr | ||
93 | mtctr r8 | ||
94 | bl 1f | ||
95 | 1: mflr r0 | ||
96 | lis r4,1b@ha | ||
97 | addi r4,r4,1b@l | ||
98 | subf r0,r4,r0 | ||
99 | add r7,r0,r7 | ||
100 | 2: lwz r0,0(r7) | ||
101 | add r0,r0,r3 | ||
102 | stw r0,0(r7) | ||
103 | addi r7,r7,4 | ||
104 | bdnz 2b | ||
105 | mtlr r11 | ||
106 | blr | ||
107 | |||
108 | /* | ||
109 | * identify_cpu, | ||
110 | * called with r3 = data offset and r4 = CPU number | ||
111 | * doesn't change r3 | ||
112 | */ | ||
113 | _GLOBAL(identify_cpu) | ||
114 | addis r8,r3,cpu_specs@ha | ||
115 | addi r8,r8,cpu_specs@l | ||
116 | mfpvr r7 | ||
117 | 1: | ||
118 | lwz r5,CPU_SPEC_PVR_MASK(r8) | ||
119 | and r5,r5,r7 | ||
120 | lwz r6,CPU_SPEC_PVR_VALUE(r8) | ||
121 | cmplw 0,r6,r5 | ||
122 | beq 1f | ||
123 | addi r8,r8,CPU_SPEC_ENTRY_SIZE | ||
124 | b 1b | ||
125 | 1: | ||
126 | addis r6,r3,cur_cpu_spec@ha | ||
127 | addi r6,r6,cur_cpu_spec@l | ||
128 | slwi r4,r4,2 | ||
129 | sub r8,r8,r3 | ||
130 | stwx r8,r4,r6 | ||
131 | blr | ||
132 | |||
133 | /* | ||
134 | * do_cpu_ftr_fixups - goes through the list of CPU feature fixups | ||
135 | * and writes nop's over sections of code that don't apply for this cpu. | ||
136 | * r3 = data offset (not changed) | ||
137 | */ | ||
138 | _GLOBAL(do_cpu_ftr_fixups) | ||
139 | /* Get CPU 0 features */ | ||
140 | addis r6,r3,cur_cpu_spec@ha | ||
141 | addi r6,r6,cur_cpu_spec@l | ||
142 | lwz r4,0(r6) | ||
143 | add r4,r4,r3 | ||
144 | lwz r4,CPU_SPEC_FEATURES(r4) | ||
145 | |||
146 | /* Get the fixup table */ | ||
147 | addis r6,r3,__start___ftr_fixup@ha | ||
148 | addi r6,r6,__start___ftr_fixup@l | ||
149 | addis r7,r3,__stop___ftr_fixup@ha | ||
150 | addi r7,r7,__stop___ftr_fixup@l | ||
151 | |||
152 | /* Do the fixup */ | ||
153 | 1: cmplw 0,r6,r7 | ||
154 | bgelr | ||
155 | addi r6,r6,16 | ||
156 | lwz r8,-16(r6) /* mask */ | ||
157 | and r8,r8,r4 | ||
158 | lwz r9,-12(r6) /* value */ | ||
159 | cmplw 0,r8,r9 | ||
160 | beq 1b | ||
161 | lwz r8,-8(r6) /* section begin */ | ||
162 | lwz r9,-4(r6) /* section end */ | ||
163 | subf. r9,r8,r9 | ||
164 | beq 1b | ||
165 | /* write nops over the section of code */ | ||
166 | /* todo: if large section, add a branch at the start of it */ | ||
167 | srwi r9,r9,2 | ||
168 | mtctr r9 | ||
169 | add r8,r8,r3 | ||
170 | lis r0,0x60000000@h /* nop */ | ||
171 | 3: stw r0,0(r8) | ||
172 | andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l | ||
173 | beq 2f | ||
174 | dcbst 0,r8 /* suboptimal, but simpler */ | ||
175 | sync | ||
176 | icbi 0,r8 | ||
177 | 2: addi r8,r8,4 | ||
178 | bdnz 3b | ||
179 | sync /* additional sync needed on g4 */ | ||
180 | isync | ||
181 | b 1b | ||
182 | |||
183 | /* | ||
184 | * call_setup_cpu - call the setup_cpu function for this cpu | ||
185 | * r3 = data offset, r24 = cpu number | ||
186 | * | ||
187 | * Setup function is called with: | ||
188 | * r3 = data offset | ||
189 | * r4 = CPU number | ||
190 | * r5 = ptr to CPU spec (relocated) | ||
191 | */ | ||
192 | _GLOBAL(call_setup_cpu) | ||
193 | addis r5,r3,cur_cpu_spec@ha | ||
194 | addi r5,r5,cur_cpu_spec@l | ||
195 | slwi r4,r24,2 | ||
196 | lwzx r5,r4,r5 | ||
197 | add r5,r5,r3 | ||
198 | lwz r6,CPU_SPEC_SETUP(r5) | ||
199 | add r6,r6,r3 | ||
200 | mtctr r6 | ||
201 | mr r4,r24 | ||
202 | bctr | ||
203 | |||
204 | #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx) | ||
205 | |||
206 | /* This gets called by via-pmu.c to switch the PLL selection | ||
207 | * on 750fx CPU. This function should really be moved to some | ||
208 | * other place (as most of the cpufreq code in via-pmu | ||
209 | */ | ||
210 | _GLOBAL(low_choose_750fx_pll) | ||
211 | /* Clear MSR:EE */ | ||
212 | mfmsr r7 | ||
213 | rlwinm r0,r7,0,17,15 | ||
214 | mtmsr r0 | ||
215 | |||
216 | /* If switching to PLL1, disable HID0:BTIC */ | ||
217 | cmplwi cr0,r3,0 | ||
218 | beq 1f | ||
219 | mfspr r5,SPRN_HID0 | ||
220 | rlwinm r5,r5,0,27,25 | ||
221 | sync | ||
222 | mtspr SPRN_HID0,r5 | ||
223 | isync | ||
224 | sync | ||
225 | |||
226 | 1: | ||
227 | /* Calc new HID1 value */ | ||
228 | mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */ | ||
229 | rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */ | ||
230 | rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */ | ||
231 | or r4,r4,r5 | ||
232 | mtspr SPRN_HID1,r4 | ||
233 | |||
234 | /* Store new HID1 image */ | ||
235 | rlwinm r6,r1,0,0,18 | ||
236 | lwz r6,TI_CPU(r6) | ||
237 | slwi r6,r6,2 | ||
238 | addis r6,r6,nap_save_hid1@ha | ||
239 | stw r4,nap_save_hid1@l(r6) | ||
240 | |||
241 | /* If switching to PLL0, enable HID0:BTIC */ | ||
242 | cmplwi cr0,r3,0 | ||
243 | bne 1f | ||
244 | mfspr r5,SPRN_HID0 | ||
245 | ori r5,r5,HID0_BTIC | ||
246 | sync | ||
247 | mtspr SPRN_HID0,r5 | ||
248 | isync | ||
249 | sync | ||
250 | |||
251 | 1: | ||
252 | /* Return */ | ||
253 | mtmsr r7 | ||
254 | blr | ||
255 | |||
256 | _GLOBAL(low_choose_7447a_dfs) | ||
257 | /* Clear MSR:EE */ | ||
258 | mfmsr r7 | ||
259 | rlwinm r0,r7,0,17,15 | ||
260 | mtmsr r0 | ||
261 | |||
262 | /* Calc new HID1 value */ | ||
263 | mfspr r4,SPRN_HID1 | ||
264 | insrwi r4,r3,1,9 /* insert parameter into bit 9 */ | ||
265 | sync | ||
266 | mtspr SPRN_HID1,r4 | ||
267 | sync | ||
268 | isync | ||
269 | |||
270 | /* Return */ | ||
271 | mtmsr r7 | ||
272 | blr | ||
273 | |||
274 | #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */ | ||
275 | |||
276 | /* void local_save_flags_ptr(unsigned long *flags) */ | ||
277 | _GLOBAL(local_save_flags_ptr) | ||
278 | mfmsr r4 | ||
279 | stw r4,0(r3) | ||
280 | blr | ||
281 | /* | ||
282 | * Need these nops here for taking over save/restore to | ||
283 | * handle lost intrs | ||
284 | * -- Cort | ||
285 | */ | ||
286 | nop | ||
287 | nop | ||
288 | nop | ||
289 | nop | ||
290 | nop | ||
291 | nop | ||
292 | nop | ||
293 | nop | ||
294 | nop | ||
295 | nop | ||
296 | nop | ||
297 | nop | ||
298 | nop | ||
299 | nop | ||
300 | nop | ||
301 | nop | ||
302 | nop | ||
303 | _GLOBAL(local_save_flags_ptr_end) | ||
304 | |||
305 | /* void local_irq_restore(unsigned long flags) */ | ||
306 | _GLOBAL(local_irq_restore) | ||
307 | /* | ||
308 | * Just set/clear the MSR_EE bit through restore/flags but do not | ||
309 | * change anything else. This is needed by the RT system and makes | ||
310 | * sense anyway. | ||
311 | * -- Cort | ||
312 | */ | ||
313 | mfmsr r4 | ||
314 | /* Copy all except the MSR_EE bit from r4 (current MSR value) | ||
315 | to r3. This is the sort of thing the rlwimi instruction is | ||
316 | designed for. -- paulus. */ | ||
317 | rlwimi r3,r4,0,17,15 | ||
318 | /* Check if things are setup the way we want _already_. */ | ||
319 | cmpw 0,r3,r4 | ||
320 | beqlr | ||
321 | 1: SYNC | ||
322 | mtmsr r3 | ||
323 | SYNC | ||
324 | blr | ||
325 | nop | ||
326 | nop | ||
327 | nop | ||
328 | nop | ||
329 | nop | ||
330 | nop | ||
331 | nop | ||
332 | nop | ||
333 | nop | ||
334 | nop | ||
335 | nop | ||
336 | nop | ||
337 | nop | ||
338 | nop | ||
339 | nop | ||
340 | nop | ||
341 | nop | ||
342 | nop | ||
343 | nop | ||
344 | _GLOBAL(local_irq_restore_end) | ||
345 | |||
346 | _GLOBAL(local_irq_disable) | ||
347 | mfmsr r0 /* Get current interrupt state */ | ||
348 | rlwinm r3,r0,16+1,32-1,31 /* Extract old value of 'EE' */ | ||
349 | rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */ | ||
350 | SYNC /* Some chip revs have problems here... */ | ||
351 | mtmsr r0 /* Update machine state */ | ||
352 | blr /* Done */ | ||
353 | /* | ||
354 | * Need these nops here for taking over save/restore to | ||
355 | * handle lost intrs | ||
356 | * -- Cort | ||
357 | */ | ||
358 | nop | ||
359 | nop | ||
360 | nop | ||
361 | nop | ||
362 | nop | ||
363 | nop | ||
364 | nop | ||
365 | nop | ||
366 | nop | ||
367 | nop | ||
368 | nop | ||
369 | nop | ||
370 | nop | ||
371 | nop | ||
372 | nop | ||
373 | _GLOBAL(local_irq_disable_end) | ||
374 | |||
375 | _GLOBAL(local_irq_enable) | ||
376 | mfmsr r3 /* Get current state */ | ||
377 | ori r3,r3,MSR_EE /* Turn on 'EE' bit */ | ||
378 | SYNC /* Some chip revs have problems here... */ | ||
379 | mtmsr r3 /* Update machine state */ | ||
380 | blr | ||
381 | /* | ||
382 | * Need these nops here for taking over save/restore to | ||
383 | * handle lost intrs | ||
384 | * -- Cort | ||
385 | */ | ||
386 | nop | ||
387 | nop | ||
388 | nop | ||
389 | nop | ||
390 | nop | ||
391 | nop | ||
392 | nop | ||
393 | nop | ||
394 | nop | ||
395 | nop | ||
396 | nop | ||
397 | nop | ||
398 | nop | ||
399 | nop | ||
400 | nop | ||
401 | nop | ||
402 | _GLOBAL(local_irq_enable_end) | ||
403 | |||
404 | /* | ||
405 | * complement mask on the msr then "or" some values on. | ||
406 | * _nmask_and_or_msr(nmask, value_to_or) | ||
407 | */ | ||
408 | _GLOBAL(_nmask_and_or_msr) | ||
409 | mfmsr r0 /* Get current msr */ | ||
410 | andc r0,r0,r3 /* And off the bits set in r3 (first parm) */ | ||
411 | or r0,r0,r4 /* Or on the bits in r4 (second parm) */ | ||
412 | SYNC /* Some chip revs have problems here... */ | ||
413 | mtmsr r0 /* Update machine state */ | ||
414 | isync | ||
415 | blr /* Done */ | ||
416 | |||
417 | |||
418 | /* | ||
419 | * Flush MMU TLB | ||
420 | */ | ||
421 | _GLOBAL(_tlbia) | ||
422 | #if defined(CONFIG_40x) | ||
423 | sync /* Flush to memory before changing mapping */ | ||
424 | tlbia | ||
425 | isync /* Flush shadow TLB */ | ||
426 | #elif defined(CONFIG_44x) | ||
427 | li r3,0 | ||
428 | sync | ||
429 | |||
430 | /* Load high watermark */ | ||
431 | lis r4,tlb_44x_hwater@ha | ||
432 | lwz r5,tlb_44x_hwater@l(r4) | ||
433 | |||
434 | 1: tlbwe r3,r3,PPC44x_TLB_PAGEID | ||
435 | addi r3,r3,1 | ||
436 | cmpw 0,r3,r5 | ||
437 | ble 1b | ||
438 | |||
439 | isync | ||
440 | #elif defined(CONFIG_FSL_BOOKE) | ||
441 | /* Invalidate all entries in TLB0 */ | ||
442 | li r3, 0x04 | ||
443 | tlbivax 0,3 | ||
444 | /* Invalidate all entries in TLB1 */ | ||
445 | li r3, 0x0c | ||
446 | tlbivax 0,3 | ||
447 | /* Invalidate all entries in TLB2 */ | ||
448 | li r3, 0x14 | ||
449 | tlbivax 0,3 | ||
450 | /* Invalidate all entries in TLB3 */ | ||
451 | li r3, 0x1c | ||
452 | tlbivax 0,3 | ||
453 | msync | ||
454 | #ifdef CONFIG_SMP | ||
455 | tlbsync | ||
456 | #endif /* CONFIG_SMP */ | ||
457 | #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ | ||
458 | #if defined(CONFIG_SMP) | ||
459 | rlwinm r8,r1,0,0,18 | ||
460 | lwz r8,TI_CPU(r8) | ||
461 | oris r8,r8,10 | ||
462 | mfmsr r10 | ||
463 | SYNC | ||
464 | rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ | ||
465 | rlwinm r0,r0,0,28,26 /* clear DR */ | ||
466 | mtmsr r0 | ||
467 | SYNC_601 | ||
468 | isync | ||
469 | lis r9,mmu_hash_lock@h | ||
470 | ori r9,r9,mmu_hash_lock@l | ||
471 | tophys(r9,r9) | ||
472 | 10: lwarx r7,0,r9 | ||
473 | cmpwi 0,r7,0 | ||
474 | bne- 10b | ||
475 | stwcx. r8,0,r9 | ||
476 | bne- 10b | ||
477 | sync | ||
478 | tlbia | ||
479 | sync | ||
480 | TLBSYNC | ||
481 | li r0,0 | ||
482 | stw r0,0(r9) /* clear mmu_hash_lock */ | ||
483 | mtmsr r10 | ||
484 | SYNC_601 | ||
485 | isync | ||
486 | #else /* CONFIG_SMP */ | ||
487 | sync | ||
488 | tlbia | ||
489 | sync | ||
490 | #endif /* CONFIG_SMP */ | ||
491 | #endif /* ! defined(CONFIG_40x) */ | ||
492 | blr | ||
493 | |||
494 | /* | ||
495 | * Flush MMU TLB for a particular address | ||
496 | */ | ||
497 | _GLOBAL(_tlbie) | ||
498 | #if defined(CONFIG_40x) | ||
499 | tlbsx. r3, 0, r3 | ||
500 | bne 10f | ||
501 | sync | ||
502 | /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. | ||
503 | * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate | ||
504 | * the TLB entry. */ | ||
505 | tlbwe r3, r3, TLB_TAG | ||
506 | isync | ||
507 | 10: | ||
508 | #elif defined(CONFIG_44x) | ||
509 | mfspr r4,SPRN_MMUCR | ||
510 | mfspr r5,SPRN_PID /* Get PID */ | ||
511 | rlwimi r4,r5,0,24,31 /* Set TID */ | ||
512 | mtspr SPRN_MMUCR,r4 | ||
513 | |||
514 | tlbsx. r3, 0, r3 | ||
515 | bne 10f | ||
516 | sync | ||
517 | /* There are only 64 TLB entries, so r3 < 64, | ||
518 | * which means bit 22, is clear. Since 22 is | ||
519 | * the V bit in the TLB_PAGEID, loading this | ||
520 | * value will invalidate the TLB entry. | ||
521 | */ | ||
522 | tlbwe r3, r3, PPC44x_TLB_PAGEID | ||
523 | isync | ||
524 | 10: | ||
525 | #elif defined(CONFIG_FSL_BOOKE) | ||
526 | rlwinm r4, r3, 0, 0, 19 | ||
527 | ori r5, r4, 0x08 /* TLBSEL = 1 */ | ||
528 | ori r6, r4, 0x10 /* TLBSEL = 2 */ | ||
529 | ori r7, r4, 0x18 /* TLBSEL = 3 */ | ||
530 | tlbivax 0, r4 | ||
531 | tlbivax 0, r5 | ||
532 | tlbivax 0, r6 | ||
533 | tlbivax 0, r7 | ||
534 | msync | ||
535 | #if defined(CONFIG_SMP) | ||
536 | tlbsync | ||
537 | #endif /* CONFIG_SMP */ | ||
538 | #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */ | ||
539 | #if defined(CONFIG_SMP) | ||
540 | rlwinm r8,r1,0,0,18 | ||
541 | lwz r8,TI_CPU(r8) | ||
542 | oris r8,r8,11 | ||
543 | mfmsr r10 | ||
544 | SYNC | ||
545 | rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ | ||
546 | rlwinm r0,r0,0,28,26 /* clear DR */ | ||
547 | mtmsr r0 | ||
548 | SYNC_601 | ||
549 | isync | ||
550 | lis r9,mmu_hash_lock@h | ||
551 | ori r9,r9,mmu_hash_lock@l | ||
552 | tophys(r9,r9) | ||
553 | 10: lwarx r7,0,r9 | ||
554 | cmpwi 0,r7,0 | ||
555 | bne- 10b | ||
556 | stwcx. r8,0,r9 | ||
557 | bne- 10b | ||
558 | eieio | ||
559 | tlbie r3 | ||
560 | sync | ||
561 | TLBSYNC | ||
562 | li r0,0 | ||
563 | stw r0,0(r9) /* clear mmu_hash_lock */ | ||
564 | mtmsr r10 | ||
565 | SYNC_601 | ||
566 | isync | ||
567 | #else /* CONFIG_SMP */ | ||
568 | tlbie r3 | ||
569 | sync | ||
570 | #endif /* CONFIG_SMP */ | ||
571 | #endif /* ! CONFIG_40x */ | ||
572 | blr | ||
573 | |||
574 | /* | ||
575 | * Flush instruction cache. | ||
576 | * This is a no-op on the 601. | ||
577 | */ | ||
578 | _GLOBAL(flush_instruction_cache) | ||
579 | #if defined(CONFIG_8xx) | ||
580 | isync | ||
581 | lis r5, IDC_INVALL@h | ||
582 | mtspr SPRN_IC_CST, r5 | ||
583 | #elif defined(CONFIG_4xx) | ||
584 | #ifdef CONFIG_403GCX | ||
585 | li r3, 512 | ||
586 | mtctr r3 | ||
587 | lis r4, KERNELBASE@h | ||
588 | 1: iccci 0, r4 | ||
589 | addi r4, r4, 16 | ||
590 | bdnz 1b | ||
591 | #else | ||
592 | lis r3, KERNELBASE@h | ||
593 | iccci 0,r3 | ||
594 | #endif | ||
595 | #elif CONFIG_FSL_BOOKE | ||
596 | mfspr r3,SPRN_L1CSR1 | ||
597 | ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR | ||
598 | mtspr SPRN_L1CSR1,r3 | ||
599 | #else | ||
600 | mfspr r3,SPRN_PVR | ||
601 | rlwinm r3,r3,16,16,31 | ||
602 | cmpwi 0,r3,1 | ||
603 | beqlr /* for 601, do nothing */ | ||
604 | /* 603/604 processor - use invalidate-all bit in HID0 */ | ||
605 | mfspr r3,SPRN_HID0 | ||
606 | ori r3,r3,HID0_ICFI | ||
607 | mtspr SPRN_HID0,r3 | ||
608 | #endif /* CONFIG_8xx/4xx */ | ||
609 | isync | ||
610 | blr | ||
611 | |||
612 | /* | ||
613 | * Write any modified data cache blocks out to memory | ||
614 | * and invalidate the corresponding instruction cache blocks. | ||
615 | * This is a no-op on the 601. | ||
616 | * | ||
617 | * flush_icache_range(unsigned long start, unsigned long stop) | ||
618 | */ | ||
619 | _GLOBAL(flush_icache_range) | ||
620 | BEGIN_FTR_SECTION | ||
621 | blr /* for 601, do nothing */ | ||
622 | END_FTR_SECTION_IFSET(PPC_FEATURE_UNIFIED_CACHE) | ||
623 | li r5,L1_CACHE_LINE_SIZE-1 | ||
624 | andc r3,r3,r5 | ||
625 | subf r4,r3,r4 | ||
626 | add r4,r4,r5 | ||
627 | srwi. r4,r4,LG_L1_CACHE_LINE_SIZE | ||
628 | beqlr | ||
629 | mtctr r4 | ||
630 | mr r6,r3 | ||
631 | 1: dcbst 0,r3 | ||
632 | addi r3,r3,L1_CACHE_LINE_SIZE | ||
633 | bdnz 1b | ||
634 | sync /* wait for dcbst's to get to ram */ | ||
635 | mtctr r4 | ||
636 | 2: icbi 0,r6 | ||
637 | addi r6,r6,L1_CACHE_LINE_SIZE | ||
638 | bdnz 2b | ||
639 | sync /* additional sync needed on g4 */ | ||
640 | isync | ||
641 | blr | ||
642 | /* | ||
643 | * Write any modified data cache blocks out to memory. | ||
644 | * Does not invalidate the corresponding cache lines (especially for | ||
645 | * any corresponding instruction cache). | ||
646 | * | ||
647 | * clean_dcache_range(unsigned long start, unsigned long stop) | ||
648 | */ | ||
649 | _GLOBAL(clean_dcache_range) | ||
650 | li r5,L1_CACHE_LINE_SIZE-1 | ||
651 | andc r3,r3,r5 | ||
652 | subf r4,r3,r4 | ||
653 | add r4,r4,r5 | ||
654 | srwi. r4,r4,LG_L1_CACHE_LINE_SIZE | ||
655 | beqlr | ||
656 | mtctr r4 | ||
657 | |||
658 | 1: dcbst 0,r3 | ||
659 | addi r3,r3,L1_CACHE_LINE_SIZE | ||
660 | bdnz 1b | ||
661 | sync /* wait for dcbst's to get to ram */ | ||
662 | blr | ||
663 | |||
664 | /* | ||
665 | * Write any modified data cache blocks out to memory and invalidate them. | ||
666 | * Does not invalidate the corresponding instruction cache blocks. | ||
667 | * | ||
668 | * flush_dcache_range(unsigned long start, unsigned long stop) | ||
669 | */ | ||
670 | _GLOBAL(flush_dcache_range) | ||
671 | li r5,L1_CACHE_LINE_SIZE-1 | ||
672 | andc r3,r3,r5 | ||
673 | subf r4,r3,r4 | ||
674 | add r4,r4,r5 | ||
675 | srwi. r4,r4,LG_L1_CACHE_LINE_SIZE | ||
676 | beqlr | ||
677 | mtctr r4 | ||
678 | |||
679 | 1: dcbf 0,r3 | ||
680 | addi r3,r3,L1_CACHE_LINE_SIZE | ||
681 | bdnz 1b | ||
682 | sync /* wait for dcbst's to get to ram */ | ||
683 | blr | ||
684 | |||
685 | /* | ||
686 | * Like above, but invalidate the D-cache. This is used by the 8xx | ||
687 | * to invalidate the cache so the PPC core doesn't get stale data | ||
688 | * from the CPM (no cache snooping here :-). | ||
689 | * | ||
690 | * invalidate_dcache_range(unsigned long start, unsigned long stop) | ||
691 | */ | ||
692 | _GLOBAL(invalidate_dcache_range) | ||
693 | li r5,L1_CACHE_LINE_SIZE-1 | ||
694 | andc r3,r3,r5 | ||
695 | subf r4,r3,r4 | ||
696 | add r4,r4,r5 | ||
697 | srwi. r4,r4,LG_L1_CACHE_LINE_SIZE | ||
698 | beqlr | ||
699 | mtctr r4 | ||
700 | |||
701 | 1: dcbi 0,r3 | ||
702 | addi r3,r3,L1_CACHE_LINE_SIZE | ||
703 | bdnz 1b | ||
704 | sync /* wait for dcbi's to get to ram */ | ||
705 | blr | ||
706 | |||
707 | #ifdef CONFIG_NOT_COHERENT_CACHE | ||
708 | /* | ||
709 | * 40x cores have 8K or 16K dcache and 32 byte line size. | ||
710 | * 44x has a 32K dcache and 32 byte line size. | ||
711 | * 8xx has 1, 2, 4, 8K variants. | ||
712 | * For now, cover the worst case of the 44x. | ||
713 | * Must be called with external interrupts disabled. | ||
714 | */ | ||
715 | #define CACHE_NWAYS 64 | ||
716 | #define CACHE_NLINES 16 | ||
717 | |||
718 | _GLOBAL(flush_dcache_all) | ||
719 | li r4, (2 * CACHE_NWAYS * CACHE_NLINES) | ||
720 | mtctr r4 | ||
721 | lis r5, KERNELBASE@h | ||
722 | 1: lwz r3, 0(r5) /* Load one word from every line */ | ||
723 | addi r5, r5, L1_CACHE_LINE_SIZE | ||
724 | bdnz 1b | ||
725 | blr | ||
726 | #endif /* CONFIG_NOT_COHERENT_CACHE */ | ||
727 | |||
728 | /* | ||
729 | * Flush a particular page from the data cache to RAM. | ||
730 | * Note: this is necessary because the instruction cache does *not* | ||
731 | * snoop from the data cache. | ||
732 | * This is a no-op on the 601 which has a unified cache. | ||
733 | * | ||
734 | * void __flush_dcache_icache(void *page) | ||
735 | */ | ||
736 | _GLOBAL(__flush_dcache_icache) | ||
737 | BEGIN_FTR_SECTION | ||
738 | blr /* for 601, do nothing */ | ||
739 | END_FTR_SECTION_IFSET(PPC_FEATURE_UNIFIED_CACHE) | ||
740 | rlwinm r3,r3,0,0,19 /* Get page base address */ | ||
741 | li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */ | ||
742 | mtctr r4 | ||
743 | mr r6,r3 | ||
744 | 0: dcbst 0,r3 /* Write line to ram */ | ||
745 | addi r3,r3,L1_CACHE_LINE_SIZE | ||
746 | bdnz 0b | ||
747 | sync | ||
748 | mtctr r4 | ||
749 | 1: icbi 0,r6 | ||
750 | addi r6,r6,L1_CACHE_LINE_SIZE | ||
751 | bdnz 1b | ||
752 | sync | ||
753 | isync | ||
754 | blr | ||
755 | |||
756 | /* | ||
757 | * Flush a particular page from the data cache to RAM, identified | ||
758 | * by its physical address. We turn off the MMU so we can just use | ||
759 | * the physical address (this may be a highmem page without a kernel | ||
760 | * mapping). | ||
761 | * | ||
762 | * void __flush_dcache_icache_phys(unsigned long physaddr) | ||
763 | */ | ||
764 | _GLOBAL(__flush_dcache_icache_phys) | ||
765 | BEGIN_FTR_SECTION | ||
766 | blr /* for 601, do nothing */ | ||
767 | END_FTR_SECTION_IFSET(PPC_FEATURE_UNIFIED_CACHE) | ||
768 | mfmsr r10 | ||
769 | rlwinm r0,r10,0,28,26 /* clear DR */ | ||
770 | mtmsr r0 | ||
771 | isync | ||
772 | rlwinm r3,r3,0,0,19 /* Get page base address */ | ||
773 | li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */ | ||
774 | mtctr r4 | ||
775 | mr r6,r3 | ||
776 | 0: dcbst 0,r3 /* Write line to ram */ | ||
777 | addi r3,r3,L1_CACHE_LINE_SIZE | ||
778 | bdnz 0b | ||
779 | sync | ||
780 | mtctr r4 | ||
781 | 1: icbi 0,r6 | ||
782 | addi r6,r6,L1_CACHE_LINE_SIZE | ||
783 | bdnz 1b | ||
784 | sync | ||
785 | mtmsr r10 /* restore DR */ | ||
786 | isync | ||
787 | blr | ||
788 | |||
789 | /* | ||
790 | * Clear pages using the dcbz instruction, which doesn't cause any | ||
791 | * memory traffic (except to write out any cache lines which get | ||
792 | * displaced). This only works on cacheable memory. | ||
793 | * | ||
794 | * void clear_pages(void *page, int order) ; | ||
795 | */ | ||
796 | _GLOBAL(clear_pages) | ||
797 | li r0,4096/L1_CACHE_LINE_SIZE | ||
798 | slw r0,r0,r4 | ||
799 | mtctr r0 | ||
800 | #ifdef CONFIG_8xx | ||
801 | li r4, 0 | ||
802 | 1: stw r4, 0(r3) | ||
803 | stw r4, 4(r3) | ||
804 | stw r4, 8(r3) | ||
805 | stw r4, 12(r3) | ||
806 | #else | ||
807 | 1: dcbz 0,r3 | ||
808 | #endif | ||
809 | addi r3,r3,L1_CACHE_LINE_SIZE | ||
810 | bdnz 1b | ||
811 | blr | ||
812 | |||
813 | /* | ||
814 | * Copy a whole page. We use the dcbz instruction on the destination | ||
815 | * to reduce memory traffic (it eliminates the unnecessary reads of | ||
816 | * the destination into cache). This requires that the destination | ||
817 | * is cacheable. | ||
818 | */ | ||
819 | #define COPY_16_BYTES \ | ||
820 | lwz r6,4(r4); \ | ||
821 | lwz r7,8(r4); \ | ||
822 | lwz r8,12(r4); \ | ||
823 | lwzu r9,16(r4); \ | ||
824 | stw r6,4(r3); \ | ||
825 | stw r7,8(r3); \ | ||
826 | stw r8,12(r3); \ | ||
827 | stwu r9,16(r3) | ||
828 | |||
829 | _GLOBAL(copy_page) | ||
830 | addi r3,r3,-4 | ||
831 | addi r4,r4,-4 | ||
832 | |||
833 | #ifdef CONFIG_8xx | ||
834 | /* don't use prefetch on 8xx */ | ||
835 | li r0,4096/L1_CACHE_LINE_SIZE | ||
836 | mtctr r0 | ||
837 | 1: COPY_16_BYTES | ||
838 | bdnz 1b | ||
839 | blr | ||
840 | |||
841 | #else /* not 8xx, we can prefetch */ | ||
842 | li r5,4 | ||
843 | |||
844 | #if MAX_COPY_PREFETCH > 1 | ||
845 | li r0,MAX_COPY_PREFETCH | ||
846 | li r11,4 | ||
847 | mtctr r0 | ||
848 | 11: dcbt r11,r4 | ||
849 | addi r11,r11,L1_CACHE_LINE_SIZE | ||
850 | bdnz 11b | ||
851 | #else /* MAX_COPY_PREFETCH == 1 */ | ||
852 | dcbt r5,r4 | ||
853 | li r11,L1_CACHE_LINE_SIZE+4 | ||
854 | #endif /* MAX_COPY_PREFETCH */ | ||
855 | li r0,4096/L1_CACHE_LINE_SIZE - MAX_COPY_PREFETCH | ||
856 | crclr 4*cr0+eq | ||
857 | 2: | ||
858 | mtctr r0 | ||
859 | 1: | ||
860 | dcbt r11,r4 | ||
861 | dcbz r5,r3 | ||
862 | COPY_16_BYTES | ||
863 | #if L1_CACHE_LINE_SIZE >= 32 | ||
864 | COPY_16_BYTES | ||
865 | #if L1_CACHE_LINE_SIZE >= 64 | ||
866 | COPY_16_BYTES | ||
867 | COPY_16_BYTES | ||
868 | #if L1_CACHE_LINE_SIZE >= 128 | ||
869 | COPY_16_BYTES | ||
870 | COPY_16_BYTES | ||
871 | COPY_16_BYTES | ||
872 | COPY_16_BYTES | ||
873 | #endif | ||
874 | #endif | ||
875 | #endif | ||
876 | bdnz 1b | ||
877 | beqlr | ||
878 | crnot 4*cr0+eq,4*cr0+eq | ||
879 | li r0,MAX_COPY_PREFETCH | ||
880 | li r11,4 | ||
881 | b 2b | ||
882 | #endif /* CONFIG_8xx */ | ||
883 | |||
884 | /* | ||
885 | * void atomic_clear_mask(atomic_t mask, atomic_t *addr) | ||
886 | * void atomic_set_mask(atomic_t mask, atomic_t *addr); | ||
887 | */ | ||
888 | _GLOBAL(atomic_clear_mask) | ||
889 | 10: lwarx r5,0,r4 | ||
890 | andc r5,r5,r3 | ||
891 | PPC405_ERR77(0,r4) | ||
892 | stwcx. r5,0,r4 | ||
893 | bne- 10b | ||
894 | blr | ||
895 | _GLOBAL(atomic_set_mask) | ||
896 | 10: lwarx r5,0,r4 | ||
897 | or r5,r5,r3 | ||
898 | PPC405_ERR77(0,r4) | ||
899 | stwcx. r5,0,r4 | ||
900 | bne- 10b | ||
901 | blr | ||
902 | |||
903 | /* | ||
904 | * I/O string operations | ||
905 | * | ||
906 | * insb(port, buf, len) | ||
907 | * outsb(port, buf, len) | ||
908 | * insw(port, buf, len) | ||
909 | * outsw(port, buf, len) | ||
910 | * insl(port, buf, len) | ||
911 | * outsl(port, buf, len) | ||
912 | * insw_ns(port, buf, len) | ||
913 | * outsw_ns(port, buf, len) | ||
914 | * insl_ns(port, buf, len) | ||
915 | * outsl_ns(port, buf, len) | ||
916 | * | ||
917 | * The *_ns versions don't do byte-swapping. | ||
918 | */ | ||
919 | _GLOBAL(_insb) | ||
920 | cmpwi 0,r5,0 | ||
921 | mtctr r5 | ||
922 | subi r4,r4,1 | ||
923 | blelr- | ||
924 | 00: lbz r5,0(r3) | ||
925 | eieio | ||
926 | stbu r5,1(r4) | ||
927 | bdnz 00b | ||
928 | blr | ||
929 | |||
930 | _GLOBAL(_outsb) | ||
931 | cmpwi 0,r5,0 | ||
932 | mtctr r5 | ||
933 | subi r4,r4,1 | ||
934 | blelr- | ||
935 | 00: lbzu r5,1(r4) | ||
936 | stb r5,0(r3) | ||
937 | eieio | ||
938 | bdnz 00b | ||
939 | blr | ||
940 | |||
941 | _GLOBAL(_insw) | ||
942 | cmpwi 0,r5,0 | ||
943 | mtctr r5 | ||
944 | subi r4,r4,2 | ||
945 | blelr- | ||
946 | 00: lhbrx r5,0,r3 | ||
947 | eieio | ||
948 | sthu r5,2(r4) | ||
949 | bdnz 00b | ||
950 | blr | ||
951 | |||
952 | _GLOBAL(_outsw) | ||
953 | cmpwi 0,r5,0 | ||
954 | mtctr r5 | ||
955 | subi r4,r4,2 | ||
956 | blelr- | ||
957 | 00: lhzu r5,2(r4) | ||
958 | eieio | ||
959 | sthbrx r5,0,r3 | ||
960 | bdnz 00b | ||
961 | blr | ||
962 | |||
963 | _GLOBAL(_insl) | ||
964 | cmpwi 0,r5,0 | ||
965 | mtctr r5 | ||
966 | subi r4,r4,4 | ||
967 | blelr- | ||
968 | 00: lwbrx r5,0,r3 | ||
969 | eieio | ||
970 | stwu r5,4(r4) | ||
971 | bdnz 00b | ||
972 | blr | ||
973 | |||
974 | _GLOBAL(_outsl) | ||
975 | cmpwi 0,r5,0 | ||
976 | mtctr r5 | ||
977 | subi r4,r4,4 | ||
978 | blelr- | ||
979 | 00: lwzu r5,4(r4) | ||
980 | stwbrx r5,0,r3 | ||
981 | eieio | ||
982 | bdnz 00b | ||
983 | blr | ||
984 | |||
985 | _GLOBAL(__ide_mm_insw) | ||
986 | _GLOBAL(_insw_ns) | ||
987 | cmpwi 0,r5,0 | ||
988 | mtctr r5 | ||
989 | subi r4,r4,2 | ||
990 | blelr- | ||
991 | 00: lhz r5,0(r3) | ||
992 | eieio | ||
993 | sthu r5,2(r4) | ||
994 | bdnz 00b | ||
995 | blr | ||
996 | |||
997 | _GLOBAL(__ide_mm_outsw) | ||
998 | _GLOBAL(_outsw_ns) | ||
999 | cmpwi 0,r5,0 | ||
1000 | mtctr r5 | ||
1001 | subi r4,r4,2 | ||
1002 | blelr- | ||
1003 | 00: lhzu r5,2(r4) | ||
1004 | sth r5,0(r3) | ||
1005 | eieio | ||
1006 | bdnz 00b | ||
1007 | blr | ||
1008 | |||
1009 | _GLOBAL(__ide_mm_insl) | ||
1010 | _GLOBAL(_insl_ns) | ||
1011 | cmpwi 0,r5,0 | ||
1012 | mtctr r5 | ||
1013 | subi r4,r4,4 | ||
1014 | blelr- | ||
1015 | 00: lwz r5,0(r3) | ||
1016 | eieio | ||
1017 | stwu r5,4(r4) | ||
1018 | bdnz 00b | ||
1019 | blr | ||
1020 | |||
1021 | _GLOBAL(__ide_mm_outsl) | ||
1022 | _GLOBAL(_outsl_ns) | ||
1023 | cmpwi 0,r5,0 | ||
1024 | mtctr r5 | ||
1025 | subi r4,r4,4 | ||
1026 | blelr- | ||
1027 | 00: lwzu r5,4(r4) | ||
1028 | stw r5,0(r3) | ||
1029 | eieio | ||
1030 | bdnz 00b | ||
1031 | blr | ||
1032 | |||
1033 | /* | ||
1034 | * Extended precision shifts. | ||
1035 | * | ||
1036 | * Updated to be valid for shift counts from 0 to 63 inclusive. | ||
1037 | * -- Gabriel | ||
1038 | * | ||
1039 | * R3/R4 has 64 bit value | ||
1040 | * R5 has shift count | ||
1041 | * result in R3/R4 | ||
1042 | * | ||
1043 | * ashrdi3: arithmetic right shift (sign propagation) | ||
1044 | * lshrdi3: logical right shift | ||
1045 | * ashldi3: left shift | ||
1046 | */ | ||
1047 | _GLOBAL(__ashrdi3) | ||
1048 | subfic r6,r5,32 | ||
1049 | srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count | ||
1050 | addi r7,r5,32 # could be xori, or addi with -32 | ||
1051 | slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) | ||
1052 | rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0 | ||
1053 | sraw r7,r3,r7 # t2 = MSW >> (count-32) | ||
1054 | or r4,r4,r6 # LSW |= t1 | ||
1055 | slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2 | ||
1056 | sraw r3,r3,r5 # MSW = MSW >> count | ||
1057 | or r4,r4,r7 # LSW |= t2 | ||
1058 | blr | ||
1059 | |||
1060 | _GLOBAL(__ashldi3) | ||
1061 | subfic r6,r5,32 | ||
1062 | slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count | ||
1063 | addi r7,r5,32 # could be xori, or addi with -32 | ||
1064 | srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count) | ||
1065 | slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32) | ||
1066 | or r3,r3,r6 # MSW |= t1 | ||
1067 | slw r4,r4,r5 # LSW = LSW << count | ||
1068 | or r3,r3,r7 # MSW |= t2 | ||
1069 | blr | ||
1070 | |||
1071 | _GLOBAL(__lshrdi3) | ||
1072 | subfic r6,r5,32 | ||
1073 | srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count | ||
1074 | addi r7,r5,32 # could be xori, or addi with -32 | ||
1075 | slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count) | ||
1076 | srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32) | ||
1077 | or r4,r4,r6 # LSW |= t1 | ||
1078 | srw r3,r3,r5 # MSW = MSW >> count | ||
1079 | or r4,r4,r7 # LSW |= t2 | ||
1080 | blr | ||
1081 | |||
1082 | _GLOBAL(abs) | ||
1083 | srawi r4,r3,31 | ||
1084 | xor r3,r3,r4 | ||
1085 | sub r3,r3,r4 | ||
1086 | blr | ||
1087 | |||
1088 | _GLOBAL(_get_SP) | ||
1089 | mr r3,r1 /* Close enough */ | ||
1090 | blr | ||
1091 | |||
1092 | /* | ||
1093 | * These are used in the alignment trap handler when emulating | ||
1094 | * single-precision loads and stores. | ||
1095 | * We restore and save the fpscr so the task gets the same result | ||
1096 | * and exceptions as if the cpu had performed the load or store. | ||
1097 | */ | ||
1098 | |||
1099 | #if defined(CONFIG_4xx) || defined(CONFIG_E500) | ||
1100 | _GLOBAL(cvt_fd) | ||
1101 | lfs 0,0(r3) | ||
1102 | stfd 0,0(r4) | ||
1103 | blr | ||
1104 | |||
1105 | _GLOBAL(cvt_df) | ||
1106 | lfd 0,0(r3) | ||
1107 | stfs 0,0(r4) | ||
1108 | blr | ||
1109 | #else | ||
1110 | _GLOBAL(cvt_fd) | ||
1111 | lfd 0,-4(r5) /* load up fpscr value */ | ||
1112 | mtfsf 0xff,0 | ||
1113 | lfs 0,0(r3) | ||
1114 | stfd 0,0(r4) | ||
1115 | mffs 0 /* save new fpscr value */ | ||
1116 | stfd 0,-4(r5) | ||
1117 | blr | ||
1118 | |||
1119 | _GLOBAL(cvt_df) | ||
1120 | lfd 0,-4(r5) /* load up fpscr value */ | ||
1121 | mtfsf 0xff,0 | ||
1122 | lfd 0,0(r3) | ||
1123 | stfs 0,0(r4) | ||
1124 | mffs 0 /* save new fpscr value */ | ||
1125 | stfd 0,-4(r5) | ||
1126 | blr | ||
1127 | #endif | ||
1128 | |||
1129 | /* | ||
1130 | * Create a kernel thread | ||
1131 | * kernel_thread(fn, arg, flags) | ||
1132 | */ | ||
1133 | _GLOBAL(kernel_thread) | ||
1134 | stwu r1,-16(r1) | ||
1135 | stw r30,8(r1) | ||
1136 | stw r31,12(r1) | ||
1137 | mr r30,r3 /* function */ | ||
1138 | mr r31,r4 /* argument */ | ||
1139 | ori r3,r5,CLONE_VM /* flags */ | ||
1140 | oris r3,r3,CLONE_UNTRACED>>16 | ||
1141 | li r4,0 /* new sp (unused) */ | ||
1142 | li r0,__NR_clone | ||
1143 | sc | ||
1144 | cmpwi 0,r3,0 /* parent or child? */ | ||
1145 | bne 1f /* return if parent */ | ||
1146 | li r0,0 /* make top-level stack frame */ | ||
1147 | stwu r0,-16(r1) | ||
1148 | mtlr r30 /* fn addr in lr */ | ||
1149 | mr r3,r31 /* load arg and call fn */ | ||
1150 | blrl | ||
1151 | li r0,__NR_exit /* exit if function returns */ | ||
1152 | li r3,0 | ||
1153 | sc | ||
1154 | 1: lwz r30,8(r1) | ||
1155 | lwz r31,12(r1) | ||
1156 | addi r1,r1,16 | ||
1157 | blr | ||
1158 | |||
1159 | /* | ||
1160 | * This routine is just here to keep GCC happy - sigh... | ||
1161 | */ | ||
1162 | _GLOBAL(__main) | ||
1163 | blr | ||
1164 | |||
1165 | #define SYSCALL(name) \ | ||
1166 | _GLOBAL(name) \ | ||
1167 | li r0,__NR_##name; \ | ||
1168 | sc; \ | ||
1169 | bnslr; \ | ||
1170 | lis r4,errno@ha; \ | ||
1171 | stw r3,errno@l(r4); \ | ||
1172 | li r3,-1; \ | ||
1173 | blr | ||
1174 | |||
1175 | SYSCALL(execve) | ||
1176 | |||
1177 | /* Why isn't this a) automatic, b) written in 'C'? */ | ||
1178 | .data | ||
1179 | .align 4 | ||
1180 | _GLOBAL(sys_call_table) | ||
1181 | .long sys_restart_syscall /* 0 */ | ||
1182 | .long sys_exit | ||
1183 | .long ppc_fork | ||
1184 | .long sys_read | ||
1185 | .long sys_write | ||
1186 | .long sys_open /* 5 */ | ||
1187 | .long sys_close | ||
1188 | .long sys_waitpid | ||
1189 | .long sys_creat | ||
1190 | .long sys_link | ||
1191 | .long sys_unlink /* 10 */ | ||
1192 | .long sys_execve | ||
1193 | .long sys_chdir | ||
1194 | .long sys_time | ||
1195 | .long sys_mknod | ||
1196 | .long sys_chmod /* 15 */ | ||
1197 | .long sys_lchown | ||
1198 | .long sys_ni_syscall /* old break syscall holder */ | ||
1199 | .long sys_stat | ||
1200 | .long sys_lseek | ||
1201 | .long sys_getpid /* 20 */ | ||
1202 | .long sys_mount | ||
1203 | .long sys_oldumount | ||
1204 | .long sys_setuid | ||
1205 | .long sys_getuid | ||
1206 | .long sys_stime /* 25 */ | ||
1207 | .long sys_ptrace | ||
1208 | .long sys_alarm | ||
1209 | .long sys_fstat | ||
1210 | .long sys_pause | ||
1211 | .long sys_utime /* 30 */ | ||
1212 | .long sys_ni_syscall /* old stty syscall holder */ | ||
1213 | .long sys_ni_syscall /* old gtty syscall holder */ | ||
1214 | .long sys_access | ||
1215 | .long sys_nice | ||
1216 | .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ | ||
1217 | .long sys_sync | ||
1218 | .long sys_kill | ||
1219 | .long sys_rename | ||
1220 | .long sys_mkdir | ||
1221 | .long sys_rmdir /* 40 */ | ||
1222 | .long sys_dup | ||
1223 | .long sys_pipe | ||
1224 | .long sys_times | ||
1225 | .long sys_ni_syscall /* old prof syscall holder */ | ||
1226 | .long sys_brk /* 45 */ | ||
1227 | .long sys_setgid | ||
1228 | .long sys_getgid | ||
1229 | .long sys_signal | ||
1230 | .long sys_geteuid | ||
1231 | .long sys_getegid /* 50 */ | ||
1232 | .long sys_acct | ||
1233 | .long sys_umount /* recycled never used phys() */ | ||
1234 | .long sys_ni_syscall /* old lock syscall holder */ | ||
1235 | .long sys_ioctl | ||
1236 | .long sys_fcntl /* 55 */ | ||
1237 | .long sys_ni_syscall /* old mpx syscall holder */ | ||
1238 | .long sys_setpgid | ||
1239 | .long sys_ni_syscall /* old ulimit syscall holder */ | ||
1240 | .long sys_olduname | ||
1241 | .long sys_umask /* 60 */ | ||
1242 | .long sys_chroot | ||
1243 | .long sys_ustat | ||
1244 | .long sys_dup2 | ||
1245 | .long sys_getppid | ||
1246 | .long sys_getpgrp /* 65 */ | ||
1247 | .long sys_setsid | ||
1248 | .long sys_sigaction | ||
1249 | .long sys_sgetmask | ||
1250 | .long sys_ssetmask | ||
1251 | .long sys_setreuid /* 70 */ | ||
1252 | .long sys_setregid | ||
1253 | .long ppc_sigsuspend | ||
1254 | .long sys_sigpending | ||
1255 | .long sys_sethostname | ||
1256 | .long sys_setrlimit /* 75 */ | ||
1257 | .long sys_old_getrlimit | ||
1258 | .long sys_getrusage | ||
1259 | .long sys_gettimeofday | ||
1260 | .long sys_settimeofday | ||
1261 | .long sys_getgroups /* 80 */ | ||
1262 | .long sys_setgroups | ||
1263 | .long ppc_select | ||
1264 | .long sys_symlink | ||
1265 | .long sys_lstat | ||
1266 | .long sys_readlink /* 85 */ | ||
1267 | .long sys_uselib | ||
1268 | .long sys_swapon | ||
1269 | .long sys_reboot | ||
1270 | .long old_readdir | ||
1271 | .long sys_mmap /* 90 */ | ||
1272 | .long sys_munmap | ||
1273 | .long sys_truncate | ||
1274 | .long sys_ftruncate | ||
1275 | .long sys_fchmod | ||
1276 | .long sys_fchown /* 95 */ | ||
1277 | .long sys_getpriority | ||
1278 | .long sys_setpriority | ||
1279 | .long sys_ni_syscall /* old profil syscall holder */ | ||
1280 | .long sys_statfs | ||
1281 | .long sys_fstatfs /* 100 */ | ||
1282 | .long sys_ni_syscall | ||
1283 | .long sys_socketcall | ||
1284 | .long sys_syslog | ||
1285 | .long sys_setitimer | ||
1286 | .long sys_getitimer /* 105 */ | ||
1287 | .long sys_newstat | ||
1288 | .long sys_newlstat | ||
1289 | .long sys_newfstat | ||
1290 | .long sys_uname | ||
1291 | .long sys_ni_syscall /* 110 */ | ||
1292 | .long sys_vhangup | ||
1293 | .long sys_ni_syscall /* old 'idle' syscall */ | ||
1294 | .long sys_ni_syscall | ||
1295 | .long sys_wait4 | ||
1296 | .long sys_swapoff /* 115 */ | ||
1297 | .long sys_sysinfo | ||
1298 | .long sys_ipc | ||
1299 | .long sys_fsync | ||
1300 | .long sys_sigreturn | ||
1301 | .long ppc_clone /* 120 */ | ||
1302 | .long sys_setdomainname | ||
1303 | .long sys_newuname | ||
1304 | .long sys_ni_syscall | ||
1305 | .long sys_adjtimex | ||
1306 | .long sys_mprotect /* 125 */ | ||
1307 | .long sys_sigprocmask | ||
1308 | .long sys_ni_syscall /* old sys_create_module */ | ||
1309 | .long sys_init_module | ||
1310 | .long sys_delete_module | ||
1311 | .long sys_ni_syscall /* old sys_get_kernel_syms */ /* 130 */ | ||
1312 | .long sys_quotactl | ||
1313 | .long sys_getpgid | ||
1314 | .long sys_fchdir | ||
1315 | .long sys_bdflush | ||
1316 | .long sys_sysfs /* 135 */ | ||
1317 | .long sys_personality | ||
1318 | .long sys_ni_syscall /* for afs_syscall */ | ||
1319 | .long sys_setfsuid | ||
1320 | .long sys_setfsgid | ||
1321 | .long sys_llseek /* 140 */ | ||
1322 | .long sys_getdents | ||
1323 | .long ppc_select | ||
1324 | .long sys_flock | ||
1325 | .long sys_msync | ||
1326 | .long sys_readv /* 145 */ | ||
1327 | .long sys_writev | ||
1328 | .long sys_getsid | ||
1329 | .long sys_fdatasync | ||
1330 | .long sys_sysctl | ||
1331 | .long sys_mlock /* 150 */ | ||
1332 | .long sys_munlock | ||
1333 | .long sys_mlockall | ||
1334 | .long sys_munlockall | ||
1335 | .long sys_sched_setparam | ||
1336 | .long sys_sched_getparam /* 155 */ | ||
1337 | .long sys_sched_setscheduler | ||
1338 | .long sys_sched_getscheduler | ||
1339 | .long sys_sched_yield | ||
1340 | .long sys_sched_get_priority_max | ||
1341 | .long sys_sched_get_priority_min /* 160 */ | ||
1342 | .long sys_sched_rr_get_interval | ||
1343 | .long sys_nanosleep | ||
1344 | .long sys_mremap | ||
1345 | .long sys_setresuid | ||
1346 | .long sys_getresuid /* 165 */ | ||
1347 | .long sys_ni_syscall /* old sys_query_module */ | ||
1348 | .long sys_poll | ||
1349 | .long sys_nfsservctl | ||
1350 | .long sys_setresgid | ||
1351 | .long sys_getresgid /* 170 */ | ||
1352 | .long sys_prctl | ||
1353 | .long sys_rt_sigreturn | ||
1354 | .long sys_rt_sigaction | ||
1355 | .long sys_rt_sigprocmask | ||
1356 | .long sys_rt_sigpending /* 175 */ | ||
1357 | .long sys_rt_sigtimedwait | ||
1358 | .long sys_rt_sigqueueinfo | ||
1359 | .long ppc_rt_sigsuspend | ||
1360 | .long sys_pread64 | ||
1361 | .long sys_pwrite64 /* 180 */ | ||
1362 | .long sys_chown | ||
1363 | .long sys_getcwd | ||
1364 | .long sys_capget | ||
1365 | .long sys_capset | ||
1366 | .long sys_sigaltstack /* 185 */ | ||
1367 | .long sys_sendfile | ||
1368 | .long sys_ni_syscall /* streams1 */ | ||
1369 | .long sys_ni_syscall /* streams2 */ | ||
1370 | .long ppc_vfork | ||
1371 | .long sys_getrlimit /* 190 */ | ||
1372 | .long sys_readahead | ||
1373 | .long sys_mmap2 | ||
1374 | .long sys_truncate64 | ||
1375 | .long sys_ftruncate64 | ||
1376 | .long sys_stat64 /* 195 */ | ||
1377 | .long sys_lstat64 | ||
1378 | .long sys_fstat64 | ||
1379 | .long sys_pciconfig_read | ||
1380 | .long sys_pciconfig_write | ||
1381 | .long sys_pciconfig_iobase /* 200 */ | ||
1382 | .long sys_ni_syscall /* 201 - reserved - MacOnLinux - new */ | ||
1383 | .long sys_getdents64 | ||
1384 | .long sys_pivot_root | ||
1385 | .long sys_fcntl64 | ||
1386 | .long sys_madvise /* 205 */ | ||
1387 | .long sys_mincore | ||
1388 | .long sys_gettid | ||
1389 | .long sys_tkill | ||
1390 | .long sys_setxattr | ||
1391 | .long sys_lsetxattr /* 210 */ | ||
1392 | .long sys_fsetxattr | ||
1393 | .long sys_getxattr | ||
1394 | .long sys_lgetxattr | ||
1395 | .long sys_fgetxattr | ||
1396 | .long sys_listxattr /* 215 */ | ||
1397 | .long sys_llistxattr | ||
1398 | .long sys_flistxattr | ||
1399 | .long sys_removexattr | ||
1400 | .long sys_lremovexattr | ||
1401 | .long sys_fremovexattr /* 220 */ | ||
1402 | .long sys_futex | ||
1403 | .long sys_sched_setaffinity | ||
1404 | .long sys_sched_getaffinity | ||
1405 | .long sys_ni_syscall | ||
1406 | .long sys_ni_syscall /* 225 - reserved for Tux */ | ||
1407 | .long sys_sendfile64 | ||
1408 | .long sys_io_setup | ||
1409 | .long sys_io_destroy | ||
1410 | .long sys_io_getevents | ||
1411 | .long sys_io_submit /* 230 */ | ||
1412 | .long sys_io_cancel | ||
1413 | .long sys_set_tid_address | ||
1414 | .long sys_fadvise64 | ||
1415 | .long sys_exit_group | ||
1416 | .long sys_lookup_dcookie /* 235 */ | ||
1417 | .long sys_epoll_create | ||
1418 | .long sys_epoll_ctl | ||
1419 | .long sys_epoll_wait | ||
1420 | .long sys_remap_file_pages | ||
1421 | .long sys_timer_create /* 240 */ | ||
1422 | .long sys_timer_settime | ||
1423 | .long sys_timer_gettime | ||
1424 | .long sys_timer_getoverrun | ||
1425 | .long sys_timer_delete | ||
1426 | .long sys_clock_settime /* 245 */ | ||
1427 | .long sys_clock_gettime | ||
1428 | .long sys_clock_getres | ||
1429 | .long sys_clock_nanosleep | ||
1430 | .long ppc_swapcontext | ||
1431 | .long sys_tgkill /* 250 */ | ||
1432 | .long sys_utimes | ||
1433 | .long sys_statfs64 | ||
1434 | .long sys_fstatfs64 | ||
1435 | .long ppc_fadvise64_64 | ||
1436 | .long sys_ni_syscall /* 255 - rtas (used on ppc64) */ | ||
1437 | .long sys_debug_setcontext | ||
1438 | .long sys_ni_syscall /* 257 reserved for vserver */ | ||
1439 | .long sys_ni_syscall /* 258 reserved for new sys_remap_file_pages */ | ||
1440 | .long sys_ni_syscall /* 259 reserved for new sys_mbind */ | ||
1441 | .long sys_ni_syscall /* 260 reserved for new sys_get_mempolicy */ | ||
1442 | .long sys_ni_syscall /* 261 reserved for new sys_set_mempolicy */ | ||
1443 | .long sys_mq_open | ||
1444 | .long sys_mq_unlink | ||
1445 | .long sys_mq_timedsend | ||
1446 | .long sys_mq_timedreceive /* 265 */ | ||
1447 | .long sys_mq_notify | ||
1448 | .long sys_mq_getsetattr | ||
1449 | .long sys_ni_syscall /* 268 reserved for sys_kexec_load */ | ||
1450 | .long sys_add_key | ||
1451 | .long sys_request_key /* 270 */ | ||
1452 | .long sys_keyctl | ||
1453 | .long sys_waitid | ||
diff --git a/arch/ppc/kernel/module.c b/arch/ppc/kernel/module.c new file mode 100644 index 000000000000..92f4e5f64f02 --- /dev/null +++ b/arch/ppc/kernel/module.c | |||
@@ -0,0 +1,320 @@ | |||
1 | /* Kernel module help for PPC. | ||
2 | Copyright (C) 2001 Rusty Russell. | ||
3 | |||
4 | This program is free software; you can redistribute it and/or modify | ||
5 | it under the terms of the GNU General Public License as published by | ||
6 | the Free Software Foundation; either version 2 of the License, or | ||
7 | (at your option) any later version. | ||
8 | |||
9 | This program is distributed in the hope that it will be useful, | ||
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | GNU General Public License for more details. | ||
13 | |||
14 | You should have received a copy of the GNU General Public License | ||
15 | along with this program; if not, write to the Free Software | ||
16 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/moduleloader.h> | ||
20 | #include <linux/elf.h> | ||
21 | #include <linux/vmalloc.h> | ||
22 | #include <linux/fs.h> | ||
23 | #include <linux/string.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/cache.h> | ||
26 | |||
27 | #if 0 | ||
28 | #define DEBUGP printk | ||
29 | #else | ||
30 | #define DEBUGP(fmt , ...) | ||
31 | #endif | ||
32 | |||
33 | LIST_HEAD(module_bug_list); | ||
34 | |||
35 | void *module_alloc(unsigned long size) | ||
36 | { | ||
37 | if (size == 0) | ||
38 | return NULL; | ||
39 | return vmalloc(size); | ||
40 | } | ||
41 | |||
42 | /* Free memory returned from module_alloc */ | ||
43 | void module_free(struct module *mod, void *module_region) | ||
44 | { | ||
45 | vfree(module_region); | ||
46 | /* FIXME: If module_region == mod->init_region, trim exception | ||
47 | table entries. */ | ||
48 | } | ||
49 | |||
50 | /* Count how many different relocations (different symbol, different | ||
51 | addend) */ | ||
52 | static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num) | ||
53 | { | ||
54 | unsigned int i, j, ret = 0; | ||
55 | |||
56 | /* Sure, this is order(n^2), but it's usually short, and not | ||
57 | time critical */ | ||
58 | for (i = 0; i < num; i++) { | ||
59 | for (j = 0; j < i; j++) { | ||
60 | /* If this addend appeared before, it's | ||
61 | already been counted */ | ||
62 | if (ELF32_R_SYM(rela[i].r_info) | ||
63 | == ELF32_R_SYM(rela[j].r_info) | ||
64 | && rela[i].r_addend == rela[j].r_addend) | ||
65 | break; | ||
66 | } | ||
67 | if (j == i) ret++; | ||
68 | } | ||
69 | return ret; | ||
70 | } | ||
71 | |||
72 | /* Get the potential trampolines size required of the init and | ||
73 | non-init sections */ | ||
74 | static unsigned long get_plt_size(const Elf32_Ehdr *hdr, | ||
75 | const Elf32_Shdr *sechdrs, | ||
76 | const char *secstrings, | ||
77 | int is_init) | ||
78 | { | ||
79 | unsigned long ret = 0; | ||
80 | unsigned i; | ||
81 | |||
82 | /* Everything marked ALLOC (this includes the exported | ||
83 | symbols) */ | ||
84 | for (i = 1; i < hdr->e_shnum; i++) { | ||
85 | /* If it's called *.init*, and we're not init, we're | ||
86 | not interested */ | ||
87 | if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != 0) | ||
88 | != is_init) | ||
89 | continue; | ||
90 | |||
91 | /* We don't want to look at debug sections. */ | ||
92 | if (strstr(secstrings + sechdrs[i].sh_name, ".debug") != 0) | ||
93 | continue; | ||
94 | |||
95 | if (sechdrs[i].sh_type == SHT_RELA) { | ||
96 | DEBUGP("Found relocations in section %u\n", i); | ||
97 | DEBUGP("Ptr: %p. Number: %u\n", | ||
98 | (void *)hdr + sechdrs[i].sh_offset, | ||
99 | sechdrs[i].sh_size / sizeof(Elf32_Rela)); | ||
100 | ret += count_relocs((void *)hdr | ||
101 | + sechdrs[i].sh_offset, | ||
102 | sechdrs[i].sh_size | ||
103 | / sizeof(Elf32_Rela)) | ||
104 | * sizeof(struct ppc_plt_entry); | ||
105 | } | ||
106 | } | ||
107 | |||
108 | return ret; | ||
109 | } | ||
110 | |||
111 | int module_frob_arch_sections(Elf32_Ehdr *hdr, | ||
112 | Elf32_Shdr *sechdrs, | ||
113 | char *secstrings, | ||
114 | struct module *me) | ||
115 | { | ||
116 | unsigned int i; | ||
117 | |||
118 | /* Find .plt and .init.plt sections */ | ||
119 | for (i = 0; i < hdr->e_shnum; i++) { | ||
120 | if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0) | ||
121 | me->arch.init_plt_section = i; | ||
122 | else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0) | ||
123 | me->arch.core_plt_section = i; | ||
124 | } | ||
125 | if (!me->arch.core_plt_section || !me->arch.init_plt_section) { | ||
126 | printk("Module doesn't contain .plt or .init.plt sections.\n"); | ||
127 | return -ENOEXEC; | ||
128 | } | ||
129 | |||
130 | /* Override their sizes */ | ||
131 | sechdrs[me->arch.core_plt_section].sh_size | ||
132 | = get_plt_size(hdr, sechdrs, secstrings, 0); | ||
133 | sechdrs[me->arch.init_plt_section].sh_size | ||
134 | = get_plt_size(hdr, sechdrs, secstrings, 1); | ||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | int apply_relocate(Elf32_Shdr *sechdrs, | ||
139 | const char *strtab, | ||
140 | unsigned int symindex, | ||
141 | unsigned int relsec, | ||
142 | struct module *module) | ||
143 | { | ||
144 | printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n", | ||
145 | module->name); | ||
146 | return -ENOEXEC; | ||
147 | } | ||
148 | |||
149 | static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) | ||
150 | { | ||
151 | if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16) | ||
152 | && entry->jump[1] == 0x396b0000 + (val & 0xffff)) | ||
153 | return 1; | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | /* Set up a trampoline in the PLT to bounce us to the distant function */ | ||
158 | static uint32_t do_plt_call(void *location, | ||
159 | Elf32_Addr val, | ||
160 | Elf32_Shdr *sechdrs, | ||
161 | struct module *mod) | ||
162 | { | ||
163 | struct ppc_plt_entry *entry; | ||
164 | |||
165 | DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); | ||
166 | /* Init, or core PLT? */ | ||
167 | if (location >= mod->module_core | ||
168 | && location < mod->module_core + mod->core_size) | ||
169 | entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; | ||
170 | else | ||
171 | entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; | ||
172 | |||
173 | /* Find this entry, or if that fails, the next avail. entry */ | ||
174 | while (entry->jump[0]) { | ||
175 | if (entry_matches(entry, val)) return (uint32_t)entry; | ||
176 | entry++; | ||
177 | } | ||
178 | |||
179 | /* Stolen from Paul Mackerras as well... */ | ||
180 | entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */ | ||
181 | entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/ | ||
182 | entry->jump[2] = 0x7d6903a6; /* mtctr r11 */ | ||
183 | entry->jump[3] = 0x4e800420; /* bctr */ | ||
184 | |||
185 | DEBUGP("Initialized plt for 0x%x at %p\n", val, entry); | ||
186 | return (uint32_t)entry; | ||
187 | } | ||
188 | |||
189 | int apply_relocate_add(Elf32_Shdr *sechdrs, | ||
190 | const char *strtab, | ||
191 | unsigned int symindex, | ||
192 | unsigned int relsec, | ||
193 | struct module *module) | ||
194 | { | ||
195 | unsigned int i; | ||
196 | Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; | ||
197 | Elf32_Sym *sym; | ||
198 | uint32_t *location; | ||
199 | uint32_t value; | ||
200 | |||
201 | DEBUGP("Applying ADD relocate section %u to %u\n", relsec, | ||
202 | sechdrs[relsec].sh_info); | ||
203 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { | ||
204 | /* This is where to make the change */ | ||
205 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
206 | + rela[i].r_offset; | ||
207 | /* This is the symbol it is referring to. Note that all | ||
208 | undefined symbols have been resolved. */ | ||
209 | sym = (Elf32_Sym *)sechdrs[symindex].sh_addr | ||
210 | + ELF32_R_SYM(rela[i].r_info); | ||
211 | /* `Everything is relative'. */ | ||
212 | value = sym->st_value + rela[i].r_addend; | ||
213 | |||
214 | switch (ELF32_R_TYPE(rela[i].r_info)) { | ||
215 | case R_PPC_ADDR32: | ||
216 | /* Simply set it */ | ||
217 | *(uint32_t *)location = value; | ||
218 | break; | ||
219 | |||
220 | case R_PPC_ADDR16_LO: | ||
221 | /* Low half of the symbol */ | ||
222 | *(uint16_t *)location = value; | ||
223 | break; | ||
224 | |||
225 | case R_PPC_ADDR16_HA: | ||
226 | /* Sign-adjusted lower 16 bits: PPC ELF ABI says: | ||
227 | (((x >> 16) + ((x & 0x8000) ? 1 : 0))) & 0xFFFF. | ||
228 | This is the same, only sane. | ||
229 | */ | ||
230 | *(uint16_t *)location = (value + 0x8000) >> 16; | ||
231 | break; | ||
232 | |||
233 | case R_PPC_REL24: | ||
234 | if ((int)(value - (uint32_t)location) < -0x02000000 | ||
235 | || (int)(value - (uint32_t)location) >= 0x02000000) | ||
236 | value = do_plt_call(location, value, | ||
237 | sechdrs, module); | ||
238 | |||
239 | /* Only replace bits 2 through 26 */ | ||
240 | DEBUGP("REL24 value = %08X. location = %08X\n", | ||
241 | value, (uint32_t)location); | ||
242 | DEBUGP("Location before: %08X.\n", | ||
243 | *(uint32_t *)location); | ||
244 | *(uint32_t *)location | ||
245 | = (*(uint32_t *)location & ~0x03fffffc) | ||
246 | | ((value - (uint32_t)location) | ||
247 | & 0x03fffffc); | ||
248 | DEBUGP("Location after: %08X.\n", | ||
249 | *(uint32_t *)location); | ||
250 | DEBUGP("ie. jump to %08X+%08X = %08X\n", | ||
251 | *(uint32_t *)location & 0x03fffffc, | ||
252 | (uint32_t)location, | ||
253 | (*(uint32_t *)location & 0x03fffffc) | ||
254 | + (uint32_t)location); | ||
255 | break; | ||
256 | |||
257 | case R_PPC_REL32: | ||
258 | /* 32-bit relative jump. */ | ||
259 | *(uint32_t *)location = value - (uint32_t)location; | ||
260 | break; | ||
261 | |||
262 | default: | ||
263 | printk("%s: unknown ADD relocation: %u\n", | ||
264 | module->name, | ||
265 | ELF32_R_TYPE(rela[i].r_info)); | ||
266 | return -ENOEXEC; | ||
267 | } | ||
268 | } | ||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | int module_finalize(const Elf_Ehdr *hdr, | ||
273 | const Elf_Shdr *sechdrs, | ||
274 | struct module *me) | ||
275 | { | ||
276 | char *secstrings; | ||
277 | unsigned int i; | ||
278 | |||
279 | me->arch.bug_table = NULL; | ||
280 | me->arch.num_bugs = 0; | ||
281 | |||
282 | /* Find the __bug_table section, if present */ | ||
283 | secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | ||
284 | for (i = 1; i < hdr->e_shnum; i++) { | ||
285 | if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table")) | ||
286 | continue; | ||
287 | me->arch.bug_table = (void *) sechdrs[i].sh_addr; | ||
288 | me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry); | ||
289 | break; | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * Strictly speaking this should have a spinlock to protect against | ||
294 | * traversals, but since we only traverse on BUG()s, a spinlock | ||
295 | * could potentially lead to deadlock and thus be counter-productive. | ||
296 | */ | ||
297 | list_add(&me->arch.bug_list, &module_bug_list); | ||
298 | |||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | void module_arch_cleanup(struct module *mod) | ||
303 | { | ||
304 | list_del(&mod->arch.bug_list); | ||
305 | } | ||
306 | |||
307 | struct bug_entry *module_find_bug(unsigned long bugaddr) | ||
308 | { | ||
309 | struct mod_arch_specific *mod; | ||
310 | unsigned int i; | ||
311 | struct bug_entry *bug; | ||
312 | |||
313 | list_for_each_entry(mod, &module_bug_list, bug_list) { | ||
314 | bug = mod->bug_table; | ||
315 | for (i = 0; i < mod->num_bugs; ++i, ++bug) | ||
316 | if (bugaddr == bug->bug_addr) | ||
317 | return bug; | ||
318 | } | ||
319 | return NULL; | ||
320 | } | ||
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c new file mode 100644 index 000000000000..98f94b60204c --- /dev/null +++ b/arch/ppc/kernel/pci.c | |||
@@ -0,0 +1,1849 @@ | |||
1 | /* | ||
2 | * Common pmac/prep/chrp pci routines. -- Cort | ||
3 | */ | ||
4 | |||
5 | #include <linux/config.h> | ||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/pci.h> | ||
8 | #include <linux/delay.h> | ||
9 | #include <linux/string.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/capability.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/bootmem.h> | ||
15 | |||
16 | #include <asm/processor.h> | ||
17 | #include <asm/io.h> | ||
18 | #include <asm/prom.h> | ||
19 | #include <asm/sections.h> | ||
20 | #include <asm/pci-bridge.h> | ||
21 | #include <asm/byteorder.h> | ||
22 | #include <asm/irq.h> | ||
23 | #include <asm/uaccess.h> | ||
24 | |||
25 | #undef DEBUG | ||
26 | |||
27 | #ifdef DEBUG | ||
28 | #define DBG(x...) printk(x) | ||
29 | #else | ||
30 | #define DBG(x...) | ||
31 | #endif | ||
32 | |||
33 | unsigned long isa_io_base = 0; | ||
34 | unsigned long isa_mem_base = 0; | ||
35 | unsigned long pci_dram_offset = 0; | ||
36 | int pcibios_assign_bus_offset = 1; | ||
37 | |||
38 | void pcibios_make_OF_bus_map(void); | ||
39 | |||
40 | static int pci_relocate_bridge_resource(struct pci_bus *bus, int i); | ||
41 | static int probe_resource(struct pci_bus *parent, struct resource *pr, | ||
42 | struct resource *res, struct resource **conflict); | ||
43 | static void update_bridge_base(struct pci_bus *bus, int i); | ||
44 | static void pcibios_fixup_resources(struct pci_dev* dev); | ||
45 | static void fixup_broken_pcnet32(struct pci_dev* dev); | ||
46 | static int reparent_resources(struct resource *parent, struct resource *res); | ||
47 | static void fixup_rev1_53c810(struct pci_dev* dev); | ||
48 | static void fixup_cpc710_pci64(struct pci_dev* dev); | ||
49 | #ifdef CONFIG_PPC_OF | ||
50 | static u8* pci_to_OF_bus_map; | ||
51 | #endif | ||
52 | |||
53 | /* By default, we don't re-assign bus numbers. We do this only on | ||
54 | * some pmacs | ||
55 | */ | ||
56 | int pci_assign_all_busses; | ||
57 | |||
58 | struct pci_controller* hose_head; | ||
59 | struct pci_controller** hose_tail = &hose_head; | ||
60 | |||
61 | static int pci_bus_count; | ||
62 | |||
63 | static void | ||
64 | fixup_rev1_53c810(struct pci_dev* dev) | ||
65 | { | ||
66 | /* rev 1 ncr53c810 chips don't set the class at all which means | ||
67 | * they don't get their resources remapped. Fix that here. | ||
68 | */ | ||
69 | |||
70 | if ((dev->class == PCI_CLASS_NOT_DEFINED)) { | ||
71 | printk("NCR 53c810 rev 1 detected, setting PCI class.\n"); | ||
72 | dev->class = PCI_CLASS_STORAGE_SCSI; | ||
73 | } | ||
74 | } | ||
75 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810); | ||
76 | |||
77 | static void | ||
78 | fixup_broken_pcnet32(struct pci_dev* dev) | ||
79 | { | ||
80 | if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { | ||
81 | dev->vendor = PCI_VENDOR_ID_AMD; | ||
82 | pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD); | ||
83 | pci_name_device(dev); | ||
84 | } | ||
85 | } | ||
86 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32); | ||
87 | |||
88 | static void | ||
89 | fixup_cpc710_pci64(struct pci_dev* dev) | ||
90 | { | ||
91 | /* Hide the PCI64 BARs from the kernel as their content doesn't | ||
92 | * fit well in the resource management | ||
93 | */ | ||
94 | dev->resource[0].start = dev->resource[0].end = 0; | ||
95 | dev->resource[0].flags = 0; | ||
96 | dev->resource[1].start = dev->resource[1].end = 0; | ||
97 | dev->resource[1].flags = 0; | ||
98 | } | ||
99 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64); | ||
100 | |||
101 | static void | ||
102 | pcibios_fixup_resources(struct pci_dev *dev) | ||
103 | { | ||
104 | struct pci_controller* hose = (struct pci_controller *)dev->sysdata; | ||
105 | int i; | ||
106 | unsigned long offset; | ||
107 | |||
108 | if (!hose) { | ||
109 | printk(KERN_ERR "No hose for PCI dev %s!\n", pci_name(dev)); | ||
110 | return; | ||
111 | } | ||
112 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | ||
113 | struct resource *res = dev->resource + i; | ||
114 | if (!res->flags) | ||
115 | continue; | ||
116 | if (res->end == 0xffffffff) { | ||
117 | DBG("PCI:%s Resource %d [%08lx-%08lx] is unassigned\n", | ||
118 | pci_name(dev), i, res->start, res->end); | ||
119 | res->end -= res->start; | ||
120 | res->start = 0; | ||
121 | res->flags |= IORESOURCE_UNSET; | ||
122 | continue; | ||
123 | } | ||
124 | offset = 0; | ||
125 | if (res->flags & IORESOURCE_MEM) { | ||
126 | offset = hose->pci_mem_offset; | ||
127 | } else if (res->flags & IORESOURCE_IO) { | ||
128 | offset = (unsigned long) hose->io_base_virt | ||
129 | - isa_io_base; | ||
130 | } | ||
131 | if (offset != 0) { | ||
132 | res->start += offset; | ||
133 | res->end += offset; | ||
134 | #ifdef DEBUG | ||
135 | printk("Fixup res %d (%lx) of dev %s: %lx -> %lx\n", | ||
136 | i, res->flags, pci_name(dev), | ||
137 | res->start - offset, res->start); | ||
138 | #endif | ||
139 | } | ||
140 | } | ||
141 | |||
142 | /* Call machine specific resource fixup */ | ||
143 | if (ppc_md.pcibios_fixup_resources) | ||
144 | ppc_md.pcibios_fixup_resources(dev); | ||
145 | } | ||
146 | DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); | ||
147 | |||
148 | void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, | ||
149 | struct resource *res) | ||
150 | { | ||
151 | unsigned long offset = 0; | ||
152 | struct pci_controller *hose = dev->sysdata; | ||
153 | |||
154 | if (hose && res->flags & IORESOURCE_IO) | ||
155 | offset = (unsigned long)hose->io_base_virt - isa_io_base; | ||
156 | else if (hose && res->flags & IORESOURCE_MEM) | ||
157 | offset = hose->pci_mem_offset; | ||
158 | region->start = res->start - offset; | ||
159 | region->end = res->end - offset; | ||
160 | } | ||
161 | EXPORT_SYMBOL(pcibios_resource_to_bus); | ||
162 | |||
163 | /* | ||
164 | * We need to avoid collisions with `mirrored' VGA ports | ||
165 | * and other strange ISA hardware, so we always want the | ||
166 | * addresses to be allocated in the 0x000-0x0ff region | ||
167 | * modulo 0x400. | ||
168 | * | ||
169 | * Why? Because some silly external IO cards only decode | ||
170 | * the low 10 bits of the IO address. The 0x00-0xff region | ||
171 | * is reserved for motherboard devices that decode all 16 | ||
172 | * bits, so it's ok to allocate at, say, 0x2800-0x28ff, | ||
173 | * but we want to try to avoid allocating at 0x2900-0x2bff | ||
174 | * which might have be mirrored at 0x0100-0x03ff.. | ||
175 | */ | ||
176 | void pcibios_align_resource(void *data, struct resource *res, unsigned long size, | ||
177 | unsigned long align) | ||
178 | { | ||
179 | struct pci_dev *dev = data; | ||
180 | |||
181 | if (res->flags & IORESOURCE_IO) { | ||
182 | unsigned long start = res->start; | ||
183 | |||
184 | if (size > 0x100) { | ||
185 | printk(KERN_ERR "PCI: I/O Region %s/%d too large" | ||
186 | " (%ld bytes)\n", pci_name(dev), | ||
187 | dev->resource - res, size); | ||
188 | } | ||
189 | |||
190 | if (start & 0x300) { | ||
191 | start = (start + 0x3ff) & ~0x3ff; | ||
192 | res->start = start; | ||
193 | } | ||
194 | } | ||
195 | } | ||
196 | EXPORT_SYMBOL(pcibios_align_resource); | ||
197 | |||
198 | /* | ||
199 | * Handle resources of PCI devices. If the world were perfect, we could | ||
200 | * just allocate all the resource regions and do nothing more. It isn't. | ||
201 | * On the other hand, we cannot just re-allocate all devices, as it would | ||
202 | * require us to know lots of host bridge internals. So we attempt to | ||
203 | * keep as much of the original configuration as possible, but tweak it | ||
204 | * when it's found to be wrong. | ||
205 | * | ||
206 | * Known BIOS problems we have to work around: | ||
207 | * - I/O or memory regions not configured | ||
208 | * - regions configured, but not enabled in the command register | ||
209 | * - bogus I/O addresses above 64K used | ||
210 | * - expansion ROMs left enabled (this may sound harmless, but given | ||
211 | * the fact the PCI specs explicitly allow address decoders to be | ||
212 | * shared between expansion ROMs and other resource regions, it's | ||
213 | * at least dangerous) | ||
214 | * | ||
215 | * Our solution: | ||
216 | * (1) Allocate resources for all buses behind PCI-to-PCI bridges. | ||
217 | * This gives us fixed barriers on where we can allocate. | ||
218 | * (2) Allocate resources for all enabled devices. If there is | ||
219 | * a collision, just mark the resource as unallocated. Also | ||
220 | * disable expansion ROMs during this step. | ||
221 | * (3) Try to allocate resources for disabled devices. If the | ||
222 | * resources were assigned correctly, everything goes well, | ||
223 | * if they weren't, they won't disturb allocation of other | ||
224 | * resources. | ||
225 | * (4) Assign new addresses to resources which were either | ||
226 | * not configured at all or misconfigured. If explicitly | ||
227 | * requested by the user, configure expansion ROM address | ||
228 | * as well. | ||
229 | */ | ||
230 | |||
231 | static void __init | ||
232 | pcibios_allocate_bus_resources(struct list_head *bus_list) | ||
233 | { | ||
234 | struct pci_bus *bus; | ||
235 | int i; | ||
236 | struct resource *res, *pr; | ||
237 | |||
238 | /* Depth-First Search on bus tree */ | ||
239 | list_for_each_entry(bus, bus_list, node) { | ||
240 | for (i = 0; i < 4; ++i) { | ||
241 | if ((res = bus->resource[i]) == NULL || !res->flags | ||
242 | || res->start > res->end) | ||
243 | continue; | ||
244 | if (bus->parent == NULL) | ||
245 | pr = (res->flags & IORESOURCE_IO)? | ||
246 | &ioport_resource: &iomem_resource; | ||
247 | else { | ||
248 | pr = pci_find_parent_resource(bus->self, res); | ||
249 | if (pr == res) { | ||
250 | /* this happens when the generic PCI | ||
251 | * code (wrongly) decides that this | ||
252 | * bridge is transparent -- paulus | ||
253 | */ | ||
254 | continue; | ||
255 | } | ||
256 | } | ||
257 | |||
258 | DBG("PCI: bridge rsrc %lx..%lx (%lx), parent %p\n", | ||
259 | res->start, res->end, res->flags, pr); | ||
260 | if (pr) { | ||
261 | if (request_resource(pr, res) == 0) | ||
262 | continue; | ||
263 | /* | ||
264 | * Must be a conflict with an existing entry. | ||
265 | * Move that entry (or entries) under the | ||
266 | * bridge resource and try again. | ||
267 | */ | ||
268 | if (reparent_resources(pr, res) == 0) | ||
269 | continue; | ||
270 | } | ||
271 | printk(KERN_ERR "PCI: Cannot allocate resource region " | ||
272 | "%d of PCI bridge %d\n", i, bus->number); | ||
273 | if (pci_relocate_bridge_resource(bus, i)) | ||
274 | bus->resource[i] = NULL; | ||
275 | } | ||
276 | pcibios_allocate_bus_resources(&bus->children); | ||
277 | } | ||
278 | } | ||
279 | |||
280 | /* | ||
281 | * Reparent resource children of pr that conflict with res | ||
282 | * under res, and make res replace those children. | ||
283 | */ | ||
284 | static int __init | ||
285 | reparent_resources(struct resource *parent, struct resource *res) | ||
286 | { | ||
287 | struct resource *p, **pp; | ||
288 | struct resource **firstpp = NULL; | ||
289 | |||
290 | for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) { | ||
291 | if (p->end < res->start) | ||
292 | continue; | ||
293 | if (res->end < p->start) | ||
294 | break; | ||
295 | if (p->start < res->start || p->end > res->end) | ||
296 | return -1; /* not completely contained */ | ||
297 | if (firstpp == NULL) | ||
298 | firstpp = pp; | ||
299 | } | ||
300 | if (firstpp == NULL) | ||
301 | return -1; /* didn't find any conflicting entries? */ | ||
302 | res->parent = parent; | ||
303 | res->child = *firstpp; | ||
304 | res->sibling = *pp; | ||
305 | *firstpp = res; | ||
306 | *pp = NULL; | ||
307 | for (p = res->child; p != NULL; p = p->sibling) { | ||
308 | p->parent = res; | ||
309 | DBG(KERN_INFO "PCI: reparented %s [%lx..%lx] under %s\n", | ||
310 | p->name, p->start, p->end, res->name); | ||
311 | } | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * A bridge has been allocated a range which is outside the range | ||
317 | * of its parent bridge, so it needs to be moved. | ||
318 | */ | ||
319 | static int __init | ||
320 | pci_relocate_bridge_resource(struct pci_bus *bus, int i) | ||
321 | { | ||
322 | struct resource *res, *pr, *conflict; | ||
323 | unsigned long try, size; | ||
324 | int j; | ||
325 | struct pci_bus *parent = bus->parent; | ||
326 | |||
327 | if (parent == NULL) { | ||
328 | /* shouldn't ever happen */ | ||
329 | printk(KERN_ERR "PCI: can't move host bridge resource\n"); | ||
330 | return -1; | ||
331 | } | ||
332 | res = bus->resource[i]; | ||
333 | if (res == NULL) | ||
334 | return -1; | ||
335 | pr = NULL; | ||
336 | for (j = 0; j < 4; j++) { | ||
337 | struct resource *r = parent->resource[j]; | ||
338 | if (!r) | ||
339 | continue; | ||
340 | if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) | ||
341 | continue; | ||
342 | if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) { | ||
343 | pr = r; | ||
344 | break; | ||
345 | } | ||
346 | if (res->flags & IORESOURCE_PREFETCH) | ||
347 | pr = r; | ||
348 | } | ||
349 | if (pr == NULL) | ||
350 | return -1; | ||
351 | size = res->end - res->start; | ||
352 | if (pr->start > pr->end || size > pr->end - pr->start) | ||
353 | return -1; | ||
354 | try = pr->end; | ||
355 | for (;;) { | ||
356 | res->start = try - size; | ||
357 | res->end = try; | ||
358 | if (probe_resource(bus->parent, pr, res, &conflict) == 0) | ||
359 | break; | ||
360 | if (conflict->start <= pr->start + size) | ||
361 | return -1; | ||
362 | try = conflict->start - 1; | ||
363 | } | ||
364 | if (request_resource(pr, res)) { | ||
365 | DBG(KERN_ERR "PCI: huh? couldn't move to %lx..%lx\n", | ||
366 | res->start, res->end); | ||
367 | return -1; /* "can't happen" */ | ||
368 | } | ||
369 | update_bridge_base(bus, i); | ||
370 | printk(KERN_INFO "PCI: bridge %d resource %d moved to %lx..%lx\n", | ||
371 | bus->number, i, res->start, res->end); | ||
372 | return 0; | ||
373 | } | ||
374 | |||
375 | static int __init | ||
376 | probe_resource(struct pci_bus *parent, struct resource *pr, | ||
377 | struct resource *res, struct resource **conflict) | ||
378 | { | ||
379 | struct pci_bus *bus; | ||
380 | struct pci_dev *dev; | ||
381 | struct resource *r; | ||
382 | int i; | ||
383 | |||
384 | for (r = pr->child; r != NULL; r = r->sibling) { | ||
385 | if (r->end >= res->start && res->end >= r->start) { | ||
386 | *conflict = r; | ||
387 | return 1; | ||
388 | } | ||
389 | } | ||
390 | list_for_each_entry(bus, &parent->children, node) { | ||
391 | for (i = 0; i < 4; ++i) { | ||
392 | if ((r = bus->resource[i]) == NULL) | ||
393 | continue; | ||
394 | if (!r->flags || r->start > r->end || r == res) | ||
395 | continue; | ||
396 | if (pci_find_parent_resource(bus->self, r) != pr) | ||
397 | continue; | ||
398 | if (r->end >= res->start && res->end >= r->start) { | ||
399 | *conflict = r; | ||
400 | return 1; | ||
401 | } | ||
402 | } | ||
403 | } | ||
404 | list_for_each_entry(dev, &parent->devices, bus_list) { | ||
405 | for (i = 0; i < 6; ++i) { | ||
406 | r = &dev->resource[i]; | ||
407 | if (!r->flags || (r->flags & IORESOURCE_UNSET)) | ||
408 | continue; | ||
409 | if (pci_find_parent_resource(dev, r) != pr) | ||
410 | continue; | ||
411 | if (r->end >= res->start && res->end >= r->start) { | ||
412 | *conflict = r; | ||
413 | return 1; | ||
414 | } | ||
415 | } | ||
416 | } | ||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | static void __init | ||
421 | update_bridge_base(struct pci_bus *bus, int i) | ||
422 | { | ||
423 | struct resource *res = bus->resource[i]; | ||
424 | u8 io_base_lo, io_limit_lo; | ||
425 | u16 mem_base, mem_limit; | ||
426 | u16 cmd; | ||
427 | unsigned long start, end, off; | ||
428 | struct pci_dev *dev = bus->self; | ||
429 | struct pci_controller *hose = dev->sysdata; | ||
430 | |||
431 | if (!hose) { | ||
432 | printk("update_bridge_base: no hose?\n"); | ||
433 | return; | ||
434 | } | ||
435 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
436 | pci_write_config_word(dev, PCI_COMMAND, | ||
437 | cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY)); | ||
438 | if (res->flags & IORESOURCE_IO) { | ||
439 | off = (unsigned long) hose->io_base_virt - isa_io_base; | ||
440 | start = res->start - off; | ||
441 | end = res->end - off; | ||
442 | io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK; | ||
443 | io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK; | ||
444 | if (end > 0xffff) { | ||
445 | pci_write_config_word(dev, PCI_IO_BASE_UPPER16, | ||
446 | start >> 16); | ||
447 | pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16, | ||
448 | end >> 16); | ||
449 | io_base_lo |= PCI_IO_RANGE_TYPE_32; | ||
450 | } else | ||
451 | io_base_lo |= PCI_IO_RANGE_TYPE_16; | ||
452 | pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo); | ||
453 | pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo); | ||
454 | |||
455 | } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH)) | ||
456 | == IORESOURCE_MEM) { | ||
457 | off = hose->pci_mem_offset; | ||
458 | mem_base = ((res->start - off) >> 16) & PCI_MEMORY_RANGE_MASK; | ||
459 | mem_limit = ((res->end - off) >> 16) & PCI_MEMORY_RANGE_MASK; | ||
460 | pci_write_config_word(dev, PCI_MEMORY_BASE, mem_base); | ||
461 | pci_write_config_word(dev, PCI_MEMORY_LIMIT, mem_limit); | ||
462 | |||
463 | } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH)) | ||
464 | == (IORESOURCE_MEM | IORESOURCE_PREFETCH)) { | ||
465 | off = hose->pci_mem_offset; | ||
466 | mem_base = ((res->start - off) >> 16) & PCI_PREF_RANGE_MASK; | ||
467 | mem_limit = ((res->end - off) >> 16) & PCI_PREF_RANGE_MASK; | ||
468 | pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, mem_base); | ||
469 | pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, mem_limit); | ||
470 | |||
471 | } else { | ||
472 | DBG(KERN_ERR "PCI: ugh, bridge %s res %d has flags=%lx\n", | ||
473 | pci_name(dev), i, res->flags); | ||
474 | } | ||
475 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
476 | } | ||
477 | |||
478 | static inline void alloc_resource(struct pci_dev *dev, int idx) | ||
479 | { | ||
480 | struct resource *pr, *r = &dev->resource[idx]; | ||
481 | |||
482 | DBG("PCI:%s: Resource %d: %08lx-%08lx (f=%lx)\n", | ||
483 | pci_name(dev), idx, r->start, r->end, r->flags); | ||
484 | pr = pci_find_parent_resource(dev, r); | ||
485 | if (!pr || request_resource(pr, r) < 0) { | ||
486 | printk(KERN_ERR "PCI: Cannot allocate resource region %d" | ||
487 | " of device %s\n", idx, pci_name(dev)); | ||
488 | if (pr) | ||
489 | DBG("PCI: parent is %p: %08lx-%08lx (f=%lx)\n", | ||
490 | pr, pr->start, pr->end, pr->flags); | ||
491 | /* We'll assign a new address later */ | ||
492 | r->flags |= IORESOURCE_UNSET; | ||
493 | r->end -= r->start; | ||
494 | r->start = 0; | ||
495 | } | ||
496 | } | ||
497 | |||
498 | static void __init | ||
499 | pcibios_allocate_resources(int pass) | ||
500 | { | ||
501 | struct pci_dev *dev = NULL; | ||
502 | int idx, disabled; | ||
503 | u16 command; | ||
504 | struct resource *r; | ||
505 | |||
506 | while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
507 | pci_read_config_word(dev, PCI_COMMAND, &command); | ||
508 | for (idx = 0; idx < 6; idx++) { | ||
509 | r = &dev->resource[idx]; | ||
510 | if (r->parent) /* Already allocated */ | ||
511 | continue; | ||
512 | if (!r->flags || (r->flags & IORESOURCE_UNSET)) | ||
513 | continue; /* Not assigned at all */ | ||
514 | if (r->flags & IORESOURCE_IO) | ||
515 | disabled = !(command & PCI_COMMAND_IO); | ||
516 | else | ||
517 | disabled = !(command & PCI_COMMAND_MEMORY); | ||
518 | if (pass == disabled) | ||
519 | alloc_resource(dev, idx); | ||
520 | } | ||
521 | if (pass) | ||
522 | continue; | ||
523 | r = &dev->resource[PCI_ROM_RESOURCE]; | ||
524 | if (r->flags & IORESOURCE_ROM_ENABLE) { | ||
525 | /* Turn the ROM off, leave the resource region, but keep it unregistered. */ | ||
526 | u32 reg; | ||
527 | DBG("PCI: Switching off ROM of %s\n", pci_name(dev)); | ||
528 | r->flags &= ~IORESOURCE_ROM_ENABLE; | ||
529 | pci_read_config_dword(dev, dev->rom_base_reg, ®); | ||
530 | pci_write_config_dword(dev, dev->rom_base_reg, | ||
531 | reg & ~PCI_ROM_ADDRESS_ENABLE); | ||
532 | } | ||
533 | } | ||
534 | } | ||
535 | |||
536 | static void __init | ||
537 | pcibios_assign_resources(void) | ||
538 | { | ||
539 | struct pci_dev *dev = NULL; | ||
540 | int idx; | ||
541 | struct resource *r; | ||
542 | |||
543 | while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
544 | int class = dev->class >> 8; | ||
545 | |||
546 | /* Don't touch classless devices and host bridges */ | ||
547 | if (!class || class == PCI_CLASS_BRIDGE_HOST) | ||
548 | continue; | ||
549 | |||
550 | for (idx = 0; idx < 6; idx++) { | ||
551 | r = &dev->resource[idx]; | ||
552 | |||
553 | /* | ||
554 | * We shall assign a new address to this resource, | ||
555 | * either because the BIOS (sic) forgot to do so | ||
556 | * or because we have decided the old address was | ||
557 | * unusable for some reason. | ||
558 | */ | ||
559 | if ((r->flags & IORESOURCE_UNSET) && r->end && | ||
560 | (!ppc_md.pcibios_enable_device_hook || | ||
561 | !ppc_md.pcibios_enable_device_hook(dev, 1))) { | ||
562 | r->flags &= ~IORESOURCE_UNSET; | ||
563 | pci_assign_resource(dev, idx); | ||
564 | } | ||
565 | } | ||
566 | |||
567 | #if 0 /* don't assign ROMs */ | ||
568 | r = &dev->resource[PCI_ROM_RESOURCE]; | ||
569 | r->end -= r->start; | ||
570 | r->start = 0; | ||
571 | if (r->end) | ||
572 | pci_assign_resource(dev, PCI_ROM_RESOURCE); | ||
573 | #endif | ||
574 | } | ||
575 | } | ||
576 | |||
577 | |||
578 | int | ||
579 | pcibios_enable_resources(struct pci_dev *dev, int mask) | ||
580 | { | ||
581 | u16 cmd, old_cmd; | ||
582 | int idx; | ||
583 | struct resource *r; | ||
584 | |||
585 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
586 | old_cmd = cmd; | ||
587 | for (idx=0; idx<6; idx++) { | ||
588 | /* Only set up the requested stuff */ | ||
589 | if (!(mask & (1<<idx))) | ||
590 | continue; | ||
591 | |||
592 | r = &dev->resource[idx]; | ||
593 | if (r->flags & IORESOURCE_UNSET) { | ||
594 | printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev)); | ||
595 | return -EINVAL; | ||
596 | } | ||
597 | if (r->flags & IORESOURCE_IO) | ||
598 | cmd |= PCI_COMMAND_IO; | ||
599 | if (r->flags & IORESOURCE_MEM) | ||
600 | cmd |= PCI_COMMAND_MEMORY; | ||
601 | } | ||
602 | if (dev->resource[PCI_ROM_RESOURCE].start) | ||
603 | cmd |= PCI_COMMAND_MEMORY; | ||
604 | if (cmd != old_cmd) { | ||
605 | printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); | ||
606 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
607 | } | ||
608 | return 0; | ||
609 | } | ||
610 | |||
611 | static int next_controller_index; | ||
612 | |||
613 | struct pci_controller * __init | ||
614 | pcibios_alloc_controller(void) | ||
615 | { | ||
616 | struct pci_controller *hose; | ||
617 | |||
618 | hose = (struct pci_controller *)alloc_bootmem(sizeof(*hose)); | ||
619 | memset(hose, 0, sizeof(struct pci_controller)); | ||
620 | |||
621 | *hose_tail = hose; | ||
622 | hose_tail = &hose->next; | ||
623 | |||
624 | hose->index = next_controller_index++; | ||
625 | |||
626 | return hose; | ||
627 | } | ||
628 | |||
629 | #ifdef CONFIG_PPC_OF | ||
630 | /* | ||
631 | * Functions below are used on OpenFirmware machines. | ||
632 | */ | ||
633 | static void __openfirmware | ||
634 | make_one_node_map(struct device_node* node, u8 pci_bus) | ||
635 | { | ||
636 | int *bus_range; | ||
637 | int len; | ||
638 | |||
639 | if (pci_bus >= pci_bus_count) | ||
640 | return; | ||
641 | bus_range = (int *) get_property(node, "bus-range", &len); | ||
642 | if (bus_range == NULL || len < 2 * sizeof(int)) { | ||
643 | printk(KERN_WARNING "Can't get bus-range for %s, " | ||
644 | "assuming it starts at 0\n", node->full_name); | ||
645 | pci_to_OF_bus_map[pci_bus] = 0; | ||
646 | } else | ||
647 | pci_to_OF_bus_map[pci_bus] = bus_range[0]; | ||
648 | |||
649 | for (node=node->child; node != 0;node = node->sibling) { | ||
650 | struct pci_dev* dev; | ||
651 | unsigned int *class_code, *reg; | ||
652 | |||
653 | class_code = (unsigned int *) get_property(node, "class-code", NULL); | ||
654 | if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && | ||
655 | (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) | ||
656 | continue; | ||
657 | reg = (unsigned int *)get_property(node, "reg", NULL); | ||
658 | if (!reg) | ||
659 | continue; | ||
660 | dev = pci_find_slot(pci_bus, ((reg[0] >> 8) & 0xff)); | ||
661 | if (!dev || !dev->subordinate) | ||
662 | continue; | ||
663 | make_one_node_map(node, dev->subordinate->number); | ||
664 | } | ||
665 | } | ||
666 | |||
667 | void __openfirmware | ||
668 | pcibios_make_OF_bus_map(void) | ||
669 | { | ||
670 | int i; | ||
671 | struct pci_controller* hose; | ||
672 | u8* of_prop_map; | ||
673 | |||
674 | pci_to_OF_bus_map = (u8*)kmalloc(pci_bus_count, GFP_KERNEL); | ||
675 | if (!pci_to_OF_bus_map) { | ||
676 | printk(KERN_ERR "Can't allocate OF bus map !\n"); | ||
677 | return; | ||
678 | } | ||
679 | |||
680 | /* We fill the bus map with invalid values, that helps | ||
681 | * debugging. | ||
682 | */ | ||
683 | for (i=0; i<pci_bus_count; i++) | ||
684 | pci_to_OF_bus_map[i] = 0xff; | ||
685 | |||
686 | /* For each hose, we begin searching bridges */ | ||
687 | for(hose=hose_head; hose; hose=hose->next) { | ||
688 | struct device_node* node; | ||
689 | node = (struct device_node *)hose->arch_data; | ||
690 | if (!node) | ||
691 | continue; | ||
692 | make_one_node_map(node, hose->first_busno); | ||
693 | } | ||
694 | of_prop_map = get_property(find_path_device("/"), "pci-OF-bus-map", NULL); | ||
695 | if (of_prop_map) | ||
696 | memcpy(of_prop_map, pci_to_OF_bus_map, pci_bus_count); | ||
697 | #ifdef DEBUG | ||
698 | printk("PCI->OF bus map:\n"); | ||
699 | for (i=0; i<pci_bus_count; i++) { | ||
700 | if (pci_to_OF_bus_map[i] == 0xff) | ||
701 | continue; | ||
702 | printk("%d -> %d\n", i, pci_to_OF_bus_map[i]); | ||
703 | } | ||
704 | #endif | ||
705 | } | ||
706 | |||
707 | typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data); | ||
708 | |||
709 | static struct device_node* __openfirmware | ||
710 | scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data) | ||
711 | { | ||
712 | struct device_node* sub_node; | ||
713 | |||
714 | for (; node != 0;node = node->sibling) { | ||
715 | unsigned int *class_code; | ||
716 | |||
717 | if (filter(node, data)) | ||
718 | return node; | ||
719 | |||
720 | /* For PCI<->PCI bridges or CardBus bridges, we go down | ||
721 | * Note: some OFs create a parent node "multifunc-device" as | ||
722 | * a fake root for all functions of a multi-function device, | ||
723 | * we go down them as well. | ||
724 | */ | ||
725 | class_code = (unsigned int *) get_property(node, "class-code", NULL); | ||
726 | if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && | ||
727 | (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) && | ||
728 | strcmp(node->name, "multifunc-device")) | ||
729 | continue; | ||
730 | sub_node = scan_OF_pci_childs(node->child, filter, data); | ||
731 | if (sub_node) | ||
732 | return sub_node; | ||
733 | } | ||
734 | return NULL; | ||
735 | } | ||
736 | |||
737 | static int | ||
738 | scan_OF_pci_childs_iterator(struct device_node* node, void* data) | ||
739 | { | ||
740 | unsigned int *reg; | ||
741 | u8* fdata = (u8*)data; | ||
742 | |||
743 | reg = (unsigned int *) get_property(node, "reg", NULL); | ||
744 | if (reg && ((reg[0] >> 8) & 0xff) == fdata[1] | ||
745 | && ((reg[0] >> 16) & 0xff) == fdata[0]) | ||
746 | return 1; | ||
747 | return 0; | ||
748 | } | ||
749 | |||
750 | static struct device_node* __openfirmware | ||
751 | scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn) | ||
752 | { | ||
753 | u8 filter_data[2] = {bus, dev_fn}; | ||
754 | |||
755 | return scan_OF_pci_childs(node, scan_OF_pci_childs_iterator, filter_data); | ||
756 | } | ||
757 | |||
758 | /* | ||
759 | * Scans the OF tree for a device node matching a PCI device | ||
760 | */ | ||
761 | struct device_node * | ||
762 | pci_busdev_to_OF_node(struct pci_bus *bus, int devfn) | ||
763 | { | ||
764 | struct pci_controller *hose; | ||
765 | struct device_node *node; | ||
766 | int busnr; | ||
767 | |||
768 | if (!have_of) | ||
769 | return NULL; | ||
770 | |||
771 | /* Lookup the hose */ | ||
772 | busnr = bus->number; | ||
773 | hose = pci_bus_to_hose(busnr); | ||
774 | if (!hose) | ||
775 | return NULL; | ||
776 | |||
777 | /* Check it has an OF node associated */ | ||
778 | node = (struct device_node *) hose->arch_data; | ||
779 | if (!node) | ||
780 | return NULL; | ||
781 | |||
782 | /* Fixup bus number according to what OF think it is. */ | ||
783 | #ifdef CONFIG_PPC_PMAC | ||
784 | /* The G5 need a special case here. Basically, we don't remap all | ||
785 | * busses on it so we don't create the pci-OF-map. However, we do | ||
786 | * remap the AGP bus and so have to deal with it. A future better | ||
787 | * fix has to be done by making the remapping per-host and always | ||
788 | * filling the pci_to_OF map. --BenH | ||
789 | */ | ||
790 | if (_machine == _MACH_Pmac && busnr >= 0xf0) | ||
791 | busnr -= 0xf0; | ||
792 | else | ||
793 | #endif | ||
794 | if (pci_to_OF_bus_map) | ||
795 | busnr = pci_to_OF_bus_map[busnr]; | ||
796 | if (busnr == 0xff) | ||
797 | return NULL; | ||
798 | |||
799 | /* Now, lookup childs of the hose */ | ||
800 | return scan_OF_childs_for_device(node->child, busnr, devfn); | ||
801 | } | ||
802 | |||
803 | struct device_node* | ||
804 | pci_device_to_OF_node(struct pci_dev *dev) | ||
805 | { | ||
806 | return pci_busdev_to_OF_node(dev->bus, dev->devfn); | ||
807 | } | ||
808 | |||
809 | /* This routine is meant to be used early during boot, when the | ||
810 | * PCI bus numbers have not yet been assigned, and you need to | ||
811 | * issue PCI config cycles to an OF device. | ||
812 | * It could also be used to "fix" RTAS config cycles if you want | ||
813 | * to set pci_assign_all_busses to 1 and still use RTAS for PCI | ||
814 | * config cycles. | ||
815 | */ | ||
816 | struct pci_controller* | ||
817 | pci_find_hose_for_OF_device(struct device_node* node) | ||
818 | { | ||
819 | if (!have_of) | ||
820 | return NULL; | ||
821 | while(node) { | ||
822 | struct pci_controller* hose; | ||
823 | for (hose=hose_head;hose;hose=hose->next) | ||
824 | if (hose->arch_data == node) | ||
825 | return hose; | ||
826 | node=node->parent; | ||
827 | } | ||
828 | return NULL; | ||
829 | } | ||
830 | |||
831 | static int __openfirmware | ||
832 | find_OF_pci_device_filter(struct device_node* node, void* data) | ||
833 | { | ||
834 | return ((void *)node == data); | ||
835 | } | ||
836 | |||
837 | /* | ||
838 | * Returns the PCI device matching a given OF node | ||
839 | */ | ||
840 | int | ||
841 | pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn) | ||
842 | { | ||
843 | unsigned int *reg; | ||
844 | struct pci_controller* hose; | ||
845 | struct pci_dev* dev = NULL; | ||
846 | |||
847 | if (!have_of) | ||
848 | return -ENODEV; | ||
849 | /* Make sure it's really a PCI device */ | ||
850 | hose = pci_find_hose_for_OF_device(node); | ||
851 | if (!hose || !hose->arch_data) | ||
852 | return -ENODEV; | ||
853 | if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child, | ||
854 | find_OF_pci_device_filter, (void *)node)) | ||
855 | return -ENODEV; | ||
856 | reg = (unsigned int *) get_property(node, "reg", NULL); | ||
857 | if (!reg) | ||
858 | return -ENODEV; | ||
859 | *bus = (reg[0] >> 16) & 0xff; | ||
860 | *devfn = ((reg[0] >> 8) & 0xff); | ||
861 | |||
862 | /* Ok, here we need some tweak. If we have already renumbered | ||
863 | * all busses, we can't rely on the OF bus number any more. | ||
864 | * the pci_to_OF_bus_map is not enough as several PCI busses | ||
865 | * may match the same OF bus number. | ||
866 | */ | ||
867 | if (!pci_to_OF_bus_map) | ||
868 | return 0; | ||
869 | while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
870 | if (pci_to_OF_bus_map[dev->bus->number] != *bus) | ||
871 | continue; | ||
872 | if (dev->devfn != *devfn) | ||
873 | continue; | ||
874 | *bus = dev->bus->number; | ||
875 | return 0; | ||
876 | } | ||
877 | return -ENODEV; | ||
878 | } | ||
879 | |||
880 | void __init | ||
881 | pci_process_bridge_OF_ranges(struct pci_controller *hose, | ||
882 | struct device_node *dev, int primary) | ||
883 | { | ||
884 | static unsigned int static_lc_ranges[256] __initdata; | ||
885 | unsigned int *dt_ranges, *lc_ranges, *ranges, *prev; | ||
886 | unsigned int size; | ||
887 | int rlen = 0, orig_rlen; | ||
888 | int memno = 0; | ||
889 | struct resource *res; | ||
890 | int np, na = prom_n_addr_cells(dev); | ||
891 | np = na + 5; | ||
892 | |||
893 | /* First we try to merge ranges to fix a problem with some pmacs | ||
894 | * that can have more than 3 ranges, fortunately using contiguous | ||
895 | * addresses -- BenH | ||
896 | */ | ||
897 | dt_ranges = (unsigned int *) get_property(dev, "ranges", &rlen); | ||
898 | if (!dt_ranges) | ||
899 | return; | ||
900 | /* Sanity check, though hopefully that never happens */ | ||
901 | if (rlen > sizeof(static_lc_ranges)) { | ||
902 | printk(KERN_WARNING "OF ranges property too large !\n"); | ||
903 | rlen = sizeof(static_lc_ranges); | ||
904 | } | ||
905 | lc_ranges = static_lc_ranges; | ||
906 | memcpy(lc_ranges, dt_ranges, rlen); | ||
907 | orig_rlen = rlen; | ||
908 | |||
909 | /* Let's work on a copy of the "ranges" property instead of damaging | ||
910 | * the device-tree image in memory | ||
911 | */ | ||
912 | ranges = lc_ranges; | ||
913 | prev = NULL; | ||
914 | while ((rlen -= np * sizeof(unsigned int)) >= 0) { | ||
915 | if (prev) { | ||
916 | if (prev[0] == ranges[0] && prev[1] == ranges[1] && | ||
917 | (prev[2] + prev[na+4]) == ranges[2] && | ||
918 | (prev[na+2] + prev[na+4]) == ranges[na+2]) { | ||
919 | prev[na+4] += ranges[na+4]; | ||
920 | ranges[0] = 0; | ||
921 | ranges += np; | ||
922 | continue; | ||
923 | } | ||
924 | } | ||
925 | prev = ranges; | ||
926 | ranges += np; | ||
927 | } | ||
928 | |||
929 | /* | ||
930 | * The ranges property is laid out as an array of elements, | ||
931 | * each of which comprises: | ||
932 | * cells 0 - 2: a PCI address | ||
933 | * cells 3 or 3+4: a CPU physical address | ||
934 | * (size depending on dev->n_addr_cells) | ||
935 | * cells 4+5 or 5+6: the size of the range | ||
936 | */ | ||
937 | ranges = lc_ranges; | ||
938 | rlen = orig_rlen; | ||
939 | while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) { | ||
940 | res = NULL; | ||
941 | size = ranges[na+4]; | ||
942 | switch (ranges[0] >> 24) { | ||
943 | case 1: /* I/O space */ | ||
944 | if (ranges[2] != 0) | ||
945 | break; | ||
946 | hose->io_base_phys = ranges[na+2]; | ||
947 | /* limit I/O space to 16MB */ | ||
948 | if (size > 0x01000000) | ||
949 | size = 0x01000000; | ||
950 | hose->io_base_virt = ioremap(ranges[na+2], size); | ||
951 | if (primary) | ||
952 | isa_io_base = (unsigned long) hose->io_base_virt; | ||
953 | res = &hose->io_resource; | ||
954 | res->flags = IORESOURCE_IO; | ||
955 | res->start = ranges[2]; | ||
956 | break; | ||
957 | case 2: /* memory space */ | ||
958 | memno = 0; | ||
959 | if (ranges[1] == 0 && ranges[2] == 0 | ||
960 | && ranges[na+4] <= (16 << 20)) { | ||
961 | /* 1st 16MB, i.e. ISA memory area */ | ||
962 | if (primary) | ||
963 | isa_mem_base = ranges[na+2]; | ||
964 | memno = 1; | ||
965 | } | ||
966 | while (memno < 3 && hose->mem_resources[memno].flags) | ||
967 | ++memno; | ||
968 | if (memno == 0) | ||
969 | hose->pci_mem_offset = ranges[na+2] - ranges[2]; | ||
970 | if (memno < 3) { | ||
971 | res = &hose->mem_resources[memno]; | ||
972 | res->flags = IORESOURCE_MEM; | ||
973 | res->start = ranges[na+2]; | ||
974 | } | ||
975 | break; | ||
976 | } | ||
977 | if (res != NULL) { | ||
978 | res->name = dev->full_name; | ||
979 | res->end = res->start + size - 1; | ||
980 | res->parent = NULL; | ||
981 | res->sibling = NULL; | ||
982 | res->child = NULL; | ||
983 | } | ||
984 | ranges += np; | ||
985 | } | ||
986 | } | ||
987 | |||
988 | /* We create the "pci-OF-bus-map" property now so it appears in the | ||
989 | * /proc device tree | ||
990 | */ | ||
991 | void __init | ||
992 | pci_create_OF_bus_map(void) | ||
993 | { | ||
994 | struct property* of_prop; | ||
995 | |||
996 | of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256); | ||
997 | if (of_prop && find_path_device("/")) { | ||
998 | memset(of_prop, -1, sizeof(struct property) + 256); | ||
999 | of_prop->name = "pci-OF-bus-map"; | ||
1000 | of_prop->length = 256; | ||
1001 | of_prop->value = (unsigned char *)&of_prop[1]; | ||
1002 | prom_add_property(find_path_device("/"), of_prop); | ||
1003 | } | ||
1004 | } | ||
1005 | |||
1006 | static ssize_t pci_show_devspec(struct device *dev, char *buf) | ||
1007 | { | ||
1008 | struct pci_dev *pdev; | ||
1009 | struct device_node *np; | ||
1010 | |||
1011 | pdev = to_pci_dev (dev); | ||
1012 | np = pci_device_to_OF_node(pdev); | ||
1013 | if (np == NULL || np->full_name == NULL) | ||
1014 | return 0; | ||
1015 | return sprintf(buf, "%s", np->full_name); | ||
1016 | } | ||
1017 | static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); | ||
1018 | |||
1019 | #endif /* CONFIG_PPC_OF */ | ||
1020 | |||
1021 | /* Add sysfs properties */ | ||
1022 | void pcibios_add_platform_entries(struct pci_dev *pdev) | ||
1023 | { | ||
1024 | #ifdef CONFIG_PPC_OF | ||
1025 | device_create_file(&pdev->dev, &dev_attr_devspec); | ||
1026 | #endif /* CONFIG_PPC_OF */ | ||
1027 | } | ||
1028 | |||
1029 | |||
1030 | #ifdef CONFIG_PPC_PMAC | ||
1031 | /* | ||
1032 | * This set of routines checks for PCI<->PCI bridges that have closed | ||
1033 | * IO resources and have child devices. It tries to re-open an IO | ||
1034 | * window on them. | ||
1035 | * | ||
1036 | * This is a _temporary_ fix to workaround a problem with Apple's OF | ||
1037 | * closing IO windows on P2P bridges when the OF drivers of cards | ||
1038 | * below this bridge don't claim any IO range (typically ATI or | ||
1039 | * Adaptec). | ||
1040 | * | ||
1041 | * A more complete fix would be to use drivers/pci/setup-bus.c, which | ||
1042 | * involves a working pcibios_fixup_pbus_ranges(), some more care about | ||
1043 | * ordering when creating the host bus resources, and maybe a few more | ||
1044 | * minor tweaks | ||
1045 | */ | ||
1046 | |||
1047 | /* Initialize bridges with base/limit values we have collected */ | ||
1048 | static void __init | ||
1049 | do_update_p2p_io_resource(struct pci_bus *bus, int enable_vga) | ||
1050 | { | ||
1051 | struct pci_dev *bridge = bus->self; | ||
1052 | struct pci_controller* hose = (struct pci_controller *)bridge->sysdata; | ||
1053 | u32 l; | ||
1054 | u16 w; | ||
1055 | struct resource res; | ||
1056 | |||
1057 | if (bus->resource[0] == NULL) | ||
1058 | return; | ||
1059 | res = *(bus->resource[0]); | ||
1060 | |||
1061 | DBG("Remapping Bus %d, bridge: %s\n", bus->number, pci_name(bridge)); | ||
1062 | res.start -= ((unsigned long) hose->io_base_virt - isa_io_base); | ||
1063 | res.end -= ((unsigned long) hose->io_base_virt - isa_io_base); | ||
1064 | DBG(" IO window: %08lx-%08lx\n", res.start, res.end); | ||
1065 | |||
1066 | /* Set up the top and bottom of the PCI I/O segment for this bus. */ | ||
1067 | pci_read_config_dword(bridge, PCI_IO_BASE, &l); | ||
1068 | l &= 0xffff000f; | ||
1069 | l |= (res.start >> 8) & 0x00f0; | ||
1070 | l |= res.end & 0xf000; | ||
1071 | pci_write_config_dword(bridge, PCI_IO_BASE, l); | ||
1072 | |||
1073 | if ((l & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { | ||
1074 | l = (res.start >> 16) | (res.end & 0xffff0000); | ||
1075 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, l); | ||
1076 | } | ||
1077 | |||
1078 | pci_read_config_word(bridge, PCI_COMMAND, &w); | ||
1079 | w |= PCI_COMMAND_IO; | ||
1080 | pci_write_config_word(bridge, PCI_COMMAND, w); | ||
1081 | |||
1082 | #if 0 /* Enabling this causes XFree 4.2.0 to hang during PCI probe */ | ||
1083 | if (enable_vga) { | ||
1084 | pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &w); | ||
1085 | w |= PCI_BRIDGE_CTL_VGA; | ||
1086 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, w); | ||
1087 | } | ||
1088 | #endif | ||
1089 | } | ||
1090 | |||
1091 | /* This function is pretty basic and actually quite broken for the | ||
1092 | * general case, it's enough for us right now though. It's supposed | ||
1093 | * to tell us if we need to open an IO range at all or not and what | ||
1094 | * size. | ||
1095 | */ | ||
1096 | static int __init | ||
1097 | check_for_io_childs(struct pci_bus *bus, struct resource* res, int *found_vga) | ||
1098 | { | ||
1099 | struct pci_dev *dev; | ||
1100 | int i; | ||
1101 | int rc = 0; | ||
1102 | |||
1103 | #define push_end(res, size) do { unsigned long __sz = (size) ; \ | ||
1104 | res->end = ((res->end + __sz) / (__sz + 1)) * (__sz + 1) + __sz; \ | ||
1105 | } while (0) | ||
1106 | |||
1107 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
1108 | u16 class = dev->class >> 8; | ||
1109 | |||
1110 | if (class == PCI_CLASS_DISPLAY_VGA || | ||
1111 | class == PCI_CLASS_NOT_DEFINED_VGA) | ||
1112 | *found_vga = 1; | ||
1113 | if (class >> 8 == PCI_BASE_CLASS_BRIDGE && dev->subordinate) | ||
1114 | rc |= check_for_io_childs(dev->subordinate, res, found_vga); | ||
1115 | if (class == PCI_CLASS_BRIDGE_CARDBUS) | ||
1116 | push_end(res, 0xfff); | ||
1117 | |||
1118 | for (i=0; i<PCI_NUM_RESOURCES; i++) { | ||
1119 | struct resource *r; | ||
1120 | unsigned long r_size; | ||
1121 | |||
1122 | if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI | ||
1123 | && i >= PCI_BRIDGE_RESOURCES) | ||
1124 | continue; | ||
1125 | r = &dev->resource[i]; | ||
1126 | r_size = r->end - r->start; | ||
1127 | if (r_size < 0xfff) | ||
1128 | r_size = 0xfff; | ||
1129 | if (r->flags & IORESOURCE_IO && (r_size) != 0) { | ||
1130 | rc = 1; | ||
1131 | push_end(res, r_size); | ||
1132 | } | ||
1133 | } | ||
1134 | } | ||
1135 | |||
1136 | return rc; | ||
1137 | } | ||
1138 | |||
1139 | /* Here we scan all P2P bridges of a given level that have a closed | ||
1140 | * IO window. Note that the test for the presence of a VGA card should | ||
1141 | * be improved to take into account already configured P2P bridges, | ||
1142 | * currently, we don't see them and might end up configuring 2 bridges | ||
1143 | * with VGA pass through enabled | ||
1144 | */ | ||
1145 | static void __init | ||
1146 | do_fixup_p2p_level(struct pci_bus *bus) | ||
1147 | { | ||
1148 | struct pci_bus *b; | ||
1149 | int i, parent_io; | ||
1150 | int has_vga = 0; | ||
1151 | |||
1152 | for (parent_io=0; parent_io<4; parent_io++) | ||
1153 | if (bus->resource[parent_io] | ||
1154 | && bus->resource[parent_io]->flags & IORESOURCE_IO) | ||
1155 | break; | ||
1156 | if (parent_io >= 4) | ||
1157 | return; | ||
1158 | |||
1159 | list_for_each_entry(b, &bus->children, node) { | ||
1160 | struct pci_dev *d = b->self; | ||
1161 | struct pci_controller* hose = (struct pci_controller *)d->sysdata; | ||
1162 | struct resource *res = b->resource[0]; | ||
1163 | struct resource tmp_res; | ||
1164 | unsigned long max; | ||
1165 | int found_vga = 0; | ||
1166 | |||
1167 | memset(&tmp_res, 0, sizeof(tmp_res)); | ||
1168 | tmp_res.start = bus->resource[parent_io]->start; | ||
1169 | |||
1170 | /* We don't let low addresses go through that closed P2P bridge, well, | ||
1171 | * that may not be necessary but I feel safer that way | ||
1172 | */ | ||
1173 | if (tmp_res.start == 0) | ||
1174 | tmp_res.start = 0x1000; | ||
1175 | |||
1176 | if (!list_empty(&b->devices) && res && res->flags == 0 && | ||
1177 | res != bus->resource[parent_io] && | ||
1178 | (d->class >> 8) == PCI_CLASS_BRIDGE_PCI && | ||
1179 | check_for_io_childs(b, &tmp_res, &found_vga)) { | ||
1180 | u8 io_base_lo; | ||
1181 | |||
1182 | printk(KERN_INFO "Fixing up IO bus %s\n", b->name); | ||
1183 | |||
1184 | if (found_vga) { | ||
1185 | if (has_vga) { | ||
1186 | printk(KERN_WARNING "Skipping VGA, already active" | ||
1187 | " on bus segment\n"); | ||
1188 | found_vga = 0; | ||
1189 | } else | ||
1190 | has_vga = 1; | ||
1191 | } | ||
1192 | pci_read_config_byte(d, PCI_IO_BASE, &io_base_lo); | ||
1193 | |||
1194 | if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) | ||
1195 | max = ((unsigned long) hose->io_base_virt | ||
1196 | - isa_io_base) + 0xffffffff; | ||
1197 | else | ||
1198 | max = ((unsigned long) hose->io_base_virt | ||
1199 | - isa_io_base) + 0xffff; | ||
1200 | |||
1201 | *res = tmp_res; | ||
1202 | res->flags = IORESOURCE_IO; | ||
1203 | res->name = b->name; | ||
1204 | |||
1205 | /* Find a resource in the parent where we can allocate */ | ||
1206 | for (i = 0 ; i < 4; i++) { | ||
1207 | struct resource *r = bus->resource[i]; | ||
1208 | if (!r) | ||
1209 | continue; | ||
1210 | if ((r->flags & IORESOURCE_IO) == 0) | ||
1211 | continue; | ||
1212 | DBG("Trying to allocate from %08lx, size %08lx from parent" | ||
1213 | " res %d: %08lx -> %08lx\n", | ||
1214 | res->start, res->end, i, r->start, r->end); | ||
1215 | |||
1216 | if (allocate_resource(r, res, res->end + 1, res->start, max, | ||
1217 | res->end + 1, NULL, NULL) < 0) { | ||
1218 | DBG("Failed !\n"); | ||
1219 | continue; | ||
1220 | } | ||
1221 | do_update_p2p_io_resource(b, found_vga); | ||
1222 | break; | ||
1223 | } | ||
1224 | } | ||
1225 | do_fixup_p2p_level(b); | ||
1226 | } | ||
1227 | } | ||
1228 | |||
1229 | static void | ||
1230 | pcibios_fixup_p2p_bridges(void) | ||
1231 | { | ||
1232 | struct pci_bus *b; | ||
1233 | |||
1234 | list_for_each_entry(b, &pci_root_buses, node) | ||
1235 | do_fixup_p2p_level(b); | ||
1236 | } | ||
1237 | |||
1238 | #endif /* CONFIG_PPC_PMAC */ | ||
1239 | |||
1240 | static int __init | ||
1241 | pcibios_init(void) | ||
1242 | { | ||
1243 | struct pci_controller *hose; | ||
1244 | struct pci_bus *bus; | ||
1245 | int next_busno; | ||
1246 | |||
1247 | printk(KERN_INFO "PCI: Probing PCI hardware\n"); | ||
1248 | |||
1249 | /* Scan all of the recorded PCI controllers. */ | ||
1250 | for (next_busno = 0, hose = hose_head; hose; hose = hose->next) { | ||
1251 | if (pci_assign_all_busses) | ||
1252 | hose->first_busno = next_busno; | ||
1253 | hose->last_busno = 0xff; | ||
1254 | bus = pci_scan_bus(hose->first_busno, hose->ops, hose); | ||
1255 | hose->last_busno = bus->subordinate; | ||
1256 | if (pci_assign_all_busses || next_busno <= hose->last_busno) | ||
1257 | next_busno = hose->last_busno + pcibios_assign_bus_offset; | ||
1258 | } | ||
1259 | pci_bus_count = next_busno; | ||
1260 | |||
1261 | /* OpenFirmware based machines need a map of OF bus | ||
1262 | * numbers vs. kernel bus numbers since we may have to | ||
1263 | * remap them. | ||
1264 | */ | ||
1265 | if (pci_assign_all_busses && have_of) | ||
1266 | pcibios_make_OF_bus_map(); | ||
1267 | |||
1268 | /* Do machine dependent PCI interrupt routing */ | ||
1269 | if (ppc_md.pci_swizzle && ppc_md.pci_map_irq) | ||
1270 | pci_fixup_irqs(ppc_md.pci_swizzle, ppc_md.pci_map_irq); | ||
1271 | |||
1272 | /* Call machine dependent fixup */ | ||
1273 | if (ppc_md.pcibios_fixup) | ||
1274 | ppc_md.pcibios_fixup(); | ||
1275 | |||
1276 | /* Allocate and assign resources */ | ||
1277 | pcibios_allocate_bus_resources(&pci_root_buses); | ||
1278 | pcibios_allocate_resources(0); | ||
1279 | pcibios_allocate_resources(1); | ||
1280 | #ifdef CONFIG_PPC_PMAC | ||
1281 | pcibios_fixup_p2p_bridges(); | ||
1282 | #endif /* CONFIG_PPC_PMAC */ | ||
1283 | pcibios_assign_resources(); | ||
1284 | |||
1285 | /* Call machine dependent post-init code */ | ||
1286 | if (ppc_md.pcibios_after_init) | ||
1287 | ppc_md.pcibios_after_init(); | ||
1288 | |||
1289 | return 0; | ||
1290 | } | ||
1291 | |||
1292 | subsys_initcall(pcibios_init); | ||
1293 | |||
1294 | unsigned char __init | ||
1295 | common_swizzle(struct pci_dev *dev, unsigned char *pinp) | ||
1296 | { | ||
1297 | struct pci_controller *hose = dev->sysdata; | ||
1298 | |||
1299 | if (dev->bus->number != hose->first_busno) { | ||
1300 | u8 pin = *pinp; | ||
1301 | do { | ||
1302 | pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); | ||
1303 | /* Move up the chain of bridges. */ | ||
1304 | dev = dev->bus->self; | ||
1305 | } while (dev->bus->self); | ||
1306 | *pinp = pin; | ||
1307 | |||
1308 | /* The slot is the idsel of the last bridge. */ | ||
1309 | } | ||
1310 | return PCI_SLOT(dev->devfn); | ||
1311 | } | ||
1312 | |||
1313 | unsigned long resource_fixup(struct pci_dev * dev, struct resource * res, | ||
1314 | unsigned long start, unsigned long size) | ||
1315 | { | ||
1316 | return start; | ||
1317 | } | ||
1318 | |||
1319 | void __init pcibios_fixup_bus(struct pci_bus *bus) | ||
1320 | { | ||
1321 | struct pci_controller *hose = (struct pci_controller *) bus->sysdata; | ||
1322 | unsigned long io_offset; | ||
1323 | struct resource *res; | ||
1324 | int i; | ||
1325 | |||
1326 | io_offset = (unsigned long)hose->io_base_virt - isa_io_base; | ||
1327 | if (bus->parent == NULL) { | ||
1328 | /* This is a host bridge - fill in its resources */ | ||
1329 | hose->bus = bus; | ||
1330 | |||
1331 | bus->resource[0] = res = &hose->io_resource; | ||
1332 | if (!res->flags) { | ||
1333 | if (io_offset) | ||
1334 | printk(KERN_ERR "I/O resource not set for host" | ||
1335 | " bridge %d\n", hose->index); | ||
1336 | res->start = 0; | ||
1337 | res->end = IO_SPACE_LIMIT; | ||
1338 | res->flags = IORESOURCE_IO; | ||
1339 | } | ||
1340 | res->start += io_offset; | ||
1341 | res->end += io_offset; | ||
1342 | |||
1343 | for (i = 0; i < 3; ++i) { | ||
1344 | res = &hose->mem_resources[i]; | ||
1345 | if (!res->flags) { | ||
1346 | if (i > 0) | ||
1347 | continue; | ||
1348 | printk(KERN_ERR "Memory resource not set for " | ||
1349 | "host bridge %d\n", hose->index); | ||
1350 | res->start = hose->pci_mem_offset; | ||
1351 | res->end = ~0U; | ||
1352 | res->flags = IORESOURCE_MEM; | ||
1353 | } | ||
1354 | bus->resource[i+1] = res; | ||
1355 | } | ||
1356 | } else { | ||
1357 | /* This is a subordinate bridge */ | ||
1358 | pci_read_bridge_bases(bus); | ||
1359 | |||
1360 | for (i = 0; i < 4; ++i) { | ||
1361 | if ((res = bus->resource[i]) == NULL) | ||
1362 | continue; | ||
1363 | if (!res->flags) | ||
1364 | continue; | ||
1365 | if (io_offset && (res->flags & IORESOURCE_IO)) { | ||
1366 | res->start += io_offset; | ||
1367 | res->end += io_offset; | ||
1368 | } else if (hose->pci_mem_offset | ||
1369 | && (res->flags & IORESOURCE_MEM)) { | ||
1370 | res->start += hose->pci_mem_offset; | ||
1371 | res->end += hose->pci_mem_offset; | ||
1372 | } | ||
1373 | } | ||
1374 | } | ||
1375 | |||
1376 | if (ppc_md.pcibios_fixup_bus) | ||
1377 | ppc_md.pcibios_fixup_bus(bus); | ||
1378 | } | ||
1379 | |||
1380 | char __init *pcibios_setup(char *str) | ||
1381 | { | ||
1382 | return str; | ||
1383 | } | ||
1384 | |||
1385 | /* the next one is stolen from the alpha port... */ | ||
1386 | void __init | ||
1387 | pcibios_update_irq(struct pci_dev *dev, int irq) | ||
1388 | { | ||
1389 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); | ||
1390 | /* XXX FIXME - update OF device tree node interrupt property */ | ||
1391 | } | ||
1392 | |||
1393 | int pcibios_enable_device(struct pci_dev *dev, int mask) | ||
1394 | { | ||
1395 | u16 cmd, old_cmd; | ||
1396 | int idx; | ||
1397 | struct resource *r; | ||
1398 | |||
1399 | if (ppc_md.pcibios_enable_device_hook) | ||
1400 | if (ppc_md.pcibios_enable_device_hook(dev, 0)) | ||
1401 | return -EINVAL; | ||
1402 | |||
1403 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
1404 | old_cmd = cmd; | ||
1405 | for (idx=0; idx<6; idx++) { | ||
1406 | r = &dev->resource[idx]; | ||
1407 | if (r->flags & IORESOURCE_UNSET) { | ||
1408 | printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev)); | ||
1409 | return -EINVAL; | ||
1410 | } | ||
1411 | if (r->flags & IORESOURCE_IO) | ||
1412 | cmd |= PCI_COMMAND_IO; | ||
1413 | if (r->flags & IORESOURCE_MEM) | ||
1414 | cmd |= PCI_COMMAND_MEMORY; | ||
1415 | } | ||
1416 | if (cmd != old_cmd) { | ||
1417 | printk("PCI: Enabling device %s (%04x -> %04x)\n", | ||
1418 | pci_name(dev), old_cmd, cmd); | ||
1419 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
1420 | } | ||
1421 | return 0; | ||
1422 | } | ||
1423 | |||
1424 | struct pci_controller* | ||
1425 | pci_bus_to_hose(int bus) | ||
1426 | { | ||
1427 | struct pci_controller* hose = hose_head; | ||
1428 | |||
1429 | for (; hose; hose = hose->next) | ||
1430 | if (bus >= hose->first_busno && bus <= hose->last_busno) | ||
1431 | return hose; | ||
1432 | return NULL; | ||
1433 | } | ||
1434 | |||
1435 | void* | ||
1436 | pci_bus_io_base(unsigned int bus) | ||
1437 | { | ||
1438 | struct pci_controller *hose; | ||
1439 | |||
1440 | hose = pci_bus_to_hose(bus); | ||
1441 | if (!hose) | ||
1442 | return NULL; | ||
1443 | return hose->io_base_virt; | ||
1444 | } | ||
1445 | |||
1446 | unsigned long | ||
1447 | pci_bus_io_base_phys(unsigned int bus) | ||
1448 | { | ||
1449 | struct pci_controller *hose; | ||
1450 | |||
1451 | hose = pci_bus_to_hose(bus); | ||
1452 | if (!hose) | ||
1453 | return 0; | ||
1454 | return hose->io_base_phys; | ||
1455 | } | ||
1456 | |||
1457 | unsigned long | ||
1458 | pci_bus_mem_base_phys(unsigned int bus) | ||
1459 | { | ||
1460 | struct pci_controller *hose; | ||
1461 | |||
1462 | hose = pci_bus_to_hose(bus); | ||
1463 | if (!hose) | ||
1464 | return 0; | ||
1465 | return hose->pci_mem_offset; | ||
1466 | } | ||
1467 | |||
1468 | unsigned long | ||
1469 | pci_resource_to_bus(struct pci_dev *pdev, struct resource *res) | ||
1470 | { | ||
1471 | /* Hack alert again ! See comments in chrp_pci.c | ||
1472 | */ | ||
1473 | struct pci_controller* hose = | ||
1474 | (struct pci_controller *)pdev->sysdata; | ||
1475 | if (hose && res->flags & IORESOURCE_MEM) | ||
1476 | return res->start - hose->pci_mem_offset; | ||
1477 | /* We may want to do something with IOs here... */ | ||
1478 | return res->start; | ||
1479 | } | ||
1480 | |||
1481 | |||
1482 | static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, | ||
1483 | unsigned long *offset, | ||
1484 | enum pci_mmap_state mmap_state) | ||
1485 | { | ||
1486 | struct pci_controller *hose = pci_bus_to_hose(dev->bus->number); | ||
1487 | unsigned long io_offset = 0; | ||
1488 | int i, res_bit; | ||
1489 | |||
1490 | if (hose == 0) | ||
1491 | return NULL; /* should never happen */ | ||
1492 | |||
1493 | /* If memory, add on the PCI bridge address offset */ | ||
1494 | if (mmap_state == pci_mmap_mem) { | ||
1495 | *offset += hose->pci_mem_offset; | ||
1496 | res_bit = IORESOURCE_MEM; | ||
1497 | } else { | ||
1498 | io_offset = (unsigned long)hose->io_base_virt; | ||
1499 | *offset += io_offset; | ||
1500 | res_bit = IORESOURCE_IO; | ||
1501 | } | ||
1502 | |||
1503 | /* | ||
1504 | * Check that the offset requested corresponds to one of the | ||
1505 | * resources of the device. | ||
1506 | */ | ||
1507 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | ||
1508 | struct resource *rp = &dev->resource[i]; | ||
1509 | int flags = rp->flags; | ||
1510 | |||
1511 | /* treat ROM as memory (should be already) */ | ||
1512 | if (i == PCI_ROM_RESOURCE) | ||
1513 | flags |= IORESOURCE_MEM; | ||
1514 | |||
1515 | /* Active and same type? */ | ||
1516 | if ((flags & res_bit) == 0) | ||
1517 | continue; | ||
1518 | |||
1519 | /* In the range of this resource? */ | ||
1520 | if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) | ||
1521 | continue; | ||
1522 | |||
1523 | /* found it! construct the final physical address */ | ||
1524 | if (mmap_state == pci_mmap_io) | ||
1525 | *offset += hose->io_base_phys - _IO_BASE; | ||
1526 | return rp; | ||
1527 | } | ||
1528 | |||
1529 | return NULL; | ||
1530 | } | ||
1531 | |||
1532 | /* | ||
1533 | * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci | ||
1534 | * device mapping. | ||
1535 | */ | ||
1536 | static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, | ||
1537 | pgprot_t protection, | ||
1538 | enum pci_mmap_state mmap_state, | ||
1539 | int write_combine) | ||
1540 | { | ||
1541 | unsigned long prot = pgprot_val(protection); | ||
1542 | |||
1543 | /* Write combine is always 0 on non-memory space mappings. On | ||
1544 | * memory space, if the user didn't pass 1, we check for a | ||
1545 | * "prefetchable" resource. This is a bit hackish, but we use | ||
1546 | * this to workaround the inability of /sysfs to provide a write | ||
1547 | * combine bit | ||
1548 | */ | ||
1549 | if (mmap_state != pci_mmap_mem) | ||
1550 | write_combine = 0; | ||
1551 | else if (write_combine == 0) { | ||
1552 | if (rp->flags & IORESOURCE_PREFETCH) | ||
1553 | write_combine = 1; | ||
1554 | } | ||
1555 | |||
1556 | /* XXX would be nice to have a way to ask for write-through */ | ||
1557 | prot |= _PAGE_NO_CACHE; | ||
1558 | if (write_combine) | ||
1559 | prot &= ~_PAGE_GUARDED; | ||
1560 | else | ||
1561 | prot |= _PAGE_GUARDED; | ||
1562 | |||
1563 | printk("PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start, | ||
1564 | prot); | ||
1565 | |||
1566 | return __pgprot(prot); | ||
1567 | } | ||
1568 | |||
1569 | /* | ||
1570 | * This one is used by /dev/mem and fbdev who have no clue about the | ||
1571 | * PCI device, it tries to find the PCI device first and calls the | ||
1572 | * above routine | ||
1573 | */ | ||
1574 | pgprot_t pci_phys_mem_access_prot(struct file *file, | ||
1575 | unsigned long offset, | ||
1576 | unsigned long size, | ||
1577 | pgprot_t protection) | ||
1578 | { | ||
1579 | struct pci_dev *pdev = NULL; | ||
1580 | struct resource *found = NULL; | ||
1581 | unsigned long prot = pgprot_val(protection); | ||
1582 | int i; | ||
1583 | |||
1584 | if (page_is_ram(offset >> PAGE_SHIFT)) | ||
1585 | return prot; | ||
1586 | |||
1587 | prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; | ||
1588 | |||
1589 | for_each_pci_dev(pdev) { | ||
1590 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | ||
1591 | struct resource *rp = &pdev->resource[i]; | ||
1592 | int flags = rp->flags; | ||
1593 | |||
1594 | /* Active and same type? */ | ||
1595 | if ((flags & IORESOURCE_MEM) == 0) | ||
1596 | continue; | ||
1597 | /* In the range of this resource? */ | ||
1598 | if (offset < (rp->start & PAGE_MASK) || | ||
1599 | offset > rp->end) | ||
1600 | continue; | ||
1601 | found = rp; | ||
1602 | break; | ||
1603 | } | ||
1604 | if (found) | ||
1605 | break; | ||
1606 | } | ||
1607 | if (found) { | ||
1608 | if (found->flags & IORESOURCE_PREFETCH) | ||
1609 | prot &= ~_PAGE_GUARDED; | ||
1610 | pci_dev_put(pdev); | ||
1611 | } | ||
1612 | |||
1613 | DBG("non-PCI map for %lx, prot: %lx\n", offset, prot); | ||
1614 | |||
1615 | return __pgprot(prot); | ||
1616 | } | ||
1617 | |||
1618 | |||
1619 | /* | ||
1620 | * Perform the actual remap of the pages for a PCI device mapping, as | ||
1621 | * appropriate for this architecture. The region in the process to map | ||
1622 | * is described by vm_start and vm_end members of VMA, the base physical | ||
1623 | * address is found in vm_pgoff. | ||
1624 | * The pci device structure is provided so that architectures may make mapping | ||
1625 | * decisions on a per-device or per-bus basis. | ||
1626 | * | ||
1627 | * Returns a negative error code on failure, zero on success. | ||
1628 | */ | ||
1629 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
1630 | enum pci_mmap_state mmap_state, | ||
1631 | int write_combine) | ||
1632 | { | ||
1633 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | ||
1634 | struct resource *rp; | ||
1635 | int ret; | ||
1636 | |||
1637 | rp = __pci_mmap_make_offset(dev, &offset, mmap_state); | ||
1638 | if (rp == NULL) | ||
1639 | return -EINVAL; | ||
1640 | |||
1641 | vma->vm_pgoff = offset >> PAGE_SHIFT; | ||
1642 | vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO; | ||
1643 | vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, | ||
1644 | vma->vm_page_prot, | ||
1645 | mmap_state, write_combine); | ||
1646 | |||
1647 | ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
1648 | vma->vm_end - vma->vm_start, vma->vm_page_prot); | ||
1649 | |||
1650 | return ret; | ||
1651 | } | ||
1652 | |||
1653 | /* Obsolete functions. Should be removed once the symbios driver | ||
1654 | * is fixed | ||
1655 | */ | ||
1656 | unsigned long | ||
1657 | phys_to_bus(unsigned long pa) | ||
1658 | { | ||
1659 | struct pci_controller *hose; | ||
1660 | int i; | ||
1661 | |||
1662 | for (hose = hose_head; hose; hose = hose->next) { | ||
1663 | for (i = 0; i < 3; ++i) { | ||
1664 | if (pa >= hose->mem_resources[i].start | ||
1665 | && pa <= hose->mem_resources[i].end) { | ||
1666 | /* | ||
1667 | * XXX the hose->pci_mem_offset really | ||
1668 | * only applies to mem_resources[0]. | ||
1669 | * We need a way to store an offset for | ||
1670 | * the others. -- paulus | ||
1671 | */ | ||
1672 | if (i == 0) | ||
1673 | pa -= hose->pci_mem_offset; | ||
1674 | return pa; | ||
1675 | } | ||
1676 | } | ||
1677 | } | ||
1678 | /* hmmm, didn't find it */ | ||
1679 | return 0; | ||
1680 | } | ||
1681 | |||
1682 | unsigned long | ||
1683 | pci_phys_to_bus(unsigned long pa, int busnr) | ||
1684 | { | ||
1685 | struct pci_controller* hose = pci_bus_to_hose(busnr); | ||
1686 | if (!hose) | ||
1687 | return pa; | ||
1688 | return pa - hose->pci_mem_offset; | ||
1689 | } | ||
1690 | |||
1691 | unsigned long | ||
1692 | pci_bus_to_phys(unsigned int ba, int busnr) | ||
1693 | { | ||
1694 | struct pci_controller* hose = pci_bus_to_hose(busnr); | ||
1695 | if (!hose) | ||
1696 | return ba; | ||
1697 | return ba + hose->pci_mem_offset; | ||
1698 | } | ||
1699 | |||
1700 | /* Provide information on locations of various I/O regions in physical | ||
1701 | * memory. Do this on a per-card basis so that we choose the right | ||
1702 | * root bridge. | ||
1703 | * Note that the returned IO or memory base is a physical address | ||
1704 | */ | ||
1705 | |||
1706 | long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) | ||
1707 | { | ||
1708 | struct pci_controller* hose; | ||
1709 | long result = -EOPNOTSUPP; | ||
1710 | |||
1711 | /* Argh ! Please forgive me for that hack, but that's the | ||
1712 | * simplest way to get existing XFree to not lockup on some | ||
1713 | * G5 machines... So when something asks for bus 0 io base | ||
1714 | * (bus 0 is HT root), we return the AGP one instead. | ||
1715 | */ | ||
1716 | #ifdef CONFIG_PPC_PMAC | ||
1717 | if (_machine == _MACH_Pmac && machine_is_compatible("MacRISC4")) | ||
1718 | if (bus == 0) | ||
1719 | bus = 0xf0; | ||
1720 | #endif /* CONFIG_PPC_PMAC */ | ||
1721 | |||
1722 | hose = pci_bus_to_hose(bus); | ||
1723 | if (!hose) | ||
1724 | return -ENODEV; | ||
1725 | |||
1726 | switch (which) { | ||
1727 | case IOBASE_BRIDGE_NUMBER: | ||
1728 | return (long)hose->first_busno; | ||
1729 | case IOBASE_MEMORY: | ||
1730 | return (long)hose->pci_mem_offset; | ||
1731 | case IOBASE_IO: | ||
1732 | return (long)hose->io_base_phys; | ||
1733 | case IOBASE_ISA_IO: | ||
1734 | return (long)isa_io_base; | ||
1735 | case IOBASE_ISA_MEM: | ||
1736 | return (long)isa_mem_base; | ||
1737 | } | ||
1738 | |||
1739 | return result; | ||
1740 | } | ||
1741 | |||
1742 | void __init | ||
1743 | pci_init_resource(struct resource *res, unsigned long start, unsigned long end, | ||
1744 | int flags, char *name) | ||
1745 | { | ||
1746 | res->start = start; | ||
1747 | res->end = end; | ||
1748 | res->flags = flags; | ||
1749 | res->name = name; | ||
1750 | res->parent = NULL; | ||
1751 | res->sibling = NULL; | ||
1752 | res->child = NULL; | ||
1753 | } | ||
1754 | |||
1755 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) | ||
1756 | { | ||
1757 | unsigned long start = pci_resource_start(dev, bar); | ||
1758 | unsigned long len = pci_resource_len(dev, bar); | ||
1759 | unsigned long flags = pci_resource_flags(dev, bar); | ||
1760 | |||
1761 | if (!len) | ||
1762 | return NULL; | ||
1763 | if (max && len > max) | ||
1764 | len = max; | ||
1765 | if (flags & IORESOURCE_IO) | ||
1766 | return ioport_map(start, len); | ||
1767 | if (flags & IORESOURCE_MEM) | ||
1768 | /* Not checking IORESOURCE_CACHEABLE because PPC does | ||
1769 | * not currently distinguish between ioremap and | ||
1770 | * ioremap_nocache. | ||
1771 | */ | ||
1772 | return ioremap(start, len); | ||
1773 | /* What? */ | ||
1774 | return NULL; | ||
1775 | } | ||
1776 | |||
1777 | void pci_iounmap(struct pci_dev *dev, void __iomem *addr) | ||
1778 | { | ||
1779 | /* Nothing to do */ | ||
1780 | } | ||
1781 | EXPORT_SYMBOL(pci_iomap); | ||
1782 | EXPORT_SYMBOL(pci_iounmap); | ||
1783 | |||
1784 | |||
1785 | /* | ||
1786 | * Null PCI config access functions, for the case when we can't | ||
1787 | * find a hose. | ||
1788 | */ | ||
1789 | #define NULL_PCI_OP(rw, size, type) \ | ||
1790 | static int \ | ||
1791 | null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \ | ||
1792 | { \ | ||
1793 | return PCIBIOS_DEVICE_NOT_FOUND; \ | ||
1794 | } | ||
1795 | |||
1796 | static int | ||
1797 | null_read_config(struct pci_bus *bus, unsigned int devfn, int offset, | ||
1798 | int len, u32 *val) | ||
1799 | { | ||
1800 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
1801 | } | ||
1802 | |||
1803 | static int | ||
1804 | null_write_config(struct pci_bus *bus, unsigned int devfn, int offset, | ||
1805 | int len, u32 val) | ||
1806 | { | ||
1807 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
1808 | } | ||
1809 | |||
1810 | static struct pci_ops null_pci_ops = | ||
1811 | { | ||
1812 | null_read_config, | ||
1813 | null_write_config | ||
1814 | }; | ||
1815 | |||
1816 | /* | ||
1817 | * These functions are used early on before PCI scanning is done | ||
1818 | * and all of the pci_dev and pci_bus structures have been created. | ||
1819 | */ | ||
1820 | static struct pci_bus * | ||
1821 | fake_pci_bus(struct pci_controller *hose, int busnr) | ||
1822 | { | ||
1823 | static struct pci_bus bus; | ||
1824 | |||
1825 | if (hose == 0) { | ||
1826 | hose = pci_bus_to_hose(busnr); | ||
1827 | if (hose == 0) | ||
1828 | printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr); | ||
1829 | } | ||
1830 | bus.number = busnr; | ||
1831 | bus.sysdata = hose; | ||
1832 | bus.ops = hose? hose->ops: &null_pci_ops; | ||
1833 | return &bus; | ||
1834 | } | ||
1835 | |||
1836 | #define EARLY_PCI_OP(rw, size, type) \ | ||
1837 | int early_##rw##_config_##size(struct pci_controller *hose, int bus, \ | ||
1838 | int devfn, int offset, type value) \ | ||
1839 | { \ | ||
1840 | return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \ | ||
1841 | devfn, offset, value); \ | ||
1842 | } | ||
1843 | |||
1844 | EARLY_PCI_OP(read, byte, u8 *) | ||
1845 | EARLY_PCI_OP(read, word, u16 *) | ||
1846 | EARLY_PCI_OP(read, dword, u32 *) | ||
1847 | EARLY_PCI_OP(write, byte, u8) | ||
1848 | EARLY_PCI_OP(write, word, u16) | ||
1849 | EARLY_PCI_OP(write, dword, u32) | ||
diff --git a/arch/ppc/kernel/perfmon.c b/arch/ppc/kernel/perfmon.c new file mode 100644 index 000000000000..918f6b252e45 --- /dev/null +++ b/arch/ppc/kernel/perfmon.c | |||
@@ -0,0 +1,93 @@ | |||
1 | /* kernel/perfmon.c | ||
2 | * PPC 32 Performance Monitor Infrastructure | ||
3 | * | ||
4 | * Author: Andy Fleming | ||
5 | * Copyright (c) 2004 Freescale Semiconductor, Inc | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/errno.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/stddef.h> | ||
18 | #include <linux/unistd.h> | ||
19 | #include <linux/ptrace.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/user.h> | ||
22 | #include <linux/a.out.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/config.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/prctl.h> | ||
28 | |||
29 | #include <asm/pgtable.h> | ||
30 | #include <asm/uaccess.h> | ||
31 | #include <asm/system.h> | ||
32 | #include <asm/io.h> | ||
33 | #include <asm/reg.h> | ||
34 | #include <asm/xmon.h> | ||
35 | |||
36 | /* A lock to regulate grabbing the interrupt */ | ||
37 | DEFINE_SPINLOCK(perfmon_lock); | ||
38 | |||
39 | #ifdef CONFIG_FSL_BOOKE | ||
40 | static void dummy_perf(struct pt_regs *regs) | ||
41 | { | ||
42 | unsigned int pmgc0 = mfpmr(PMRN_PMGC0); | ||
43 | |||
44 | pmgc0 &= ~PMGC0_PMIE; | ||
45 | mtpmr(PMRN_PMGC0, pmgc0); | ||
46 | } | ||
47 | |||
48 | #else | ||
49 | /* Ensure exceptions are disabled */ | ||
50 | |||
51 | static void dummy_perf(struct pt_regs *regs) | ||
52 | { | ||
53 | unsigned int mmcr0 = mfspr(SPRN_MMCR0); | ||
54 | |||
55 | mmcr0 &= ~MMCR0_PMXE; | ||
56 | mtspr(SPRN_MMCR0, mmcr0); | ||
57 | } | ||
58 | #endif | ||
59 | |||
60 | void (*perf_irq)(struct pt_regs *) = dummy_perf; | ||
61 | |||
62 | /* Grab the interrupt, if it's free. | ||
63 | * Returns 0 on success, -1 if the interrupt is taken already */ | ||
64 | int request_perfmon_irq(void (*handler)(struct pt_regs *)) | ||
65 | { | ||
66 | int err = 0; | ||
67 | |||
68 | spin_lock(&perfmon_lock); | ||
69 | |||
70 | if (perf_irq == dummy_perf) | ||
71 | perf_irq = handler; | ||
72 | else { | ||
73 | pr_info("perfmon irq already handled by %p\n", perf_irq); | ||
74 | err = -1; | ||
75 | } | ||
76 | |||
77 | spin_unlock(&perfmon_lock); | ||
78 | |||
79 | return err; | ||
80 | } | ||
81 | |||
82 | void free_perfmon_irq(void) | ||
83 | { | ||
84 | spin_lock(&perfmon_lock); | ||
85 | |||
86 | perf_irq = dummy_perf; | ||
87 | |||
88 | spin_unlock(&perfmon_lock); | ||
89 | } | ||
90 | |||
91 | EXPORT_SYMBOL(perf_irq); | ||
92 | EXPORT_SYMBOL(request_perfmon_irq); | ||
93 | EXPORT_SYMBOL(free_perfmon_irq); | ||
diff --git a/arch/ppc/kernel/perfmon_fsl_booke.c b/arch/ppc/kernel/perfmon_fsl_booke.c new file mode 100644 index 000000000000..03526bfb0840 --- /dev/null +++ b/arch/ppc/kernel/perfmon_fsl_booke.c | |||
@@ -0,0 +1,222 @@ | |||
1 | /* kernel/perfmon_fsl_booke.c | ||
2 | * Freescale Book-E Performance Monitor code | ||
3 | * | ||
4 | * Author: Andy Fleming | ||
5 | * Copyright (c) 2004 Freescale Semiconductor, Inc | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/errno.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/stddef.h> | ||
18 | #include <linux/unistd.h> | ||
19 | #include <linux/ptrace.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/user.h> | ||
22 | #include <linux/a.out.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/config.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/prctl.h> | ||
28 | |||
29 | #include <asm/pgtable.h> | ||
30 | #include <asm/uaccess.h> | ||
31 | #include <asm/system.h> | ||
32 | #include <asm/io.h> | ||
33 | #include <asm/reg.h> | ||
34 | #include <asm/xmon.h> | ||
35 | #include <asm/perfmon.h> | ||
36 | |||
37 | static inline u32 get_pmlca(int ctr); | ||
38 | static inline void set_pmlca(int ctr, u32 pmlca); | ||
39 | |||
40 | static inline u32 get_pmlca(int ctr) | ||
41 | { | ||
42 | u32 pmlca; | ||
43 | |||
44 | switch (ctr) { | ||
45 | case 0: | ||
46 | pmlca = mfpmr(PMRN_PMLCA0); | ||
47 | break; | ||
48 | case 1: | ||
49 | pmlca = mfpmr(PMRN_PMLCA1); | ||
50 | break; | ||
51 | case 2: | ||
52 | pmlca = mfpmr(PMRN_PMLCA2); | ||
53 | break; | ||
54 | case 3: | ||
55 | pmlca = mfpmr(PMRN_PMLCA3); | ||
56 | break; | ||
57 | default: | ||
58 | panic("Bad ctr number\n"); | ||
59 | } | ||
60 | |||
61 | return pmlca; | ||
62 | } | ||
63 | |||
64 | static inline void set_pmlca(int ctr, u32 pmlca) | ||
65 | { | ||
66 | switch (ctr) { | ||
67 | case 0: | ||
68 | mtpmr(PMRN_PMLCA0, pmlca); | ||
69 | break; | ||
70 | case 1: | ||
71 | mtpmr(PMRN_PMLCA1, pmlca); | ||
72 | break; | ||
73 | case 2: | ||
74 | mtpmr(PMRN_PMLCA2, pmlca); | ||
75 | break; | ||
76 | case 3: | ||
77 | mtpmr(PMRN_PMLCA3, pmlca); | ||
78 | break; | ||
79 | default: | ||
80 | panic("Bad ctr number\n"); | ||
81 | } | ||
82 | } | ||
83 | |||
84 | void init_pmc_stop(int ctr) | ||
85 | { | ||
86 | u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU | | ||
87 | PMLCA_FCM1 | PMLCA_FCM0); | ||
88 | u32 pmlcb = 0; | ||
89 | |||
90 | switch (ctr) { | ||
91 | case 0: | ||
92 | mtpmr(PMRN_PMLCA0, pmlca); | ||
93 | mtpmr(PMRN_PMLCB0, pmlcb); | ||
94 | break; | ||
95 | case 1: | ||
96 | mtpmr(PMRN_PMLCA1, pmlca); | ||
97 | mtpmr(PMRN_PMLCB1, pmlcb); | ||
98 | break; | ||
99 | case 2: | ||
100 | mtpmr(PMRN_PMLCA2, pmlca); | ||
101 | mtpmr(PMRN_PMLCB2, pmlcb); | ||
102 | break; | ||
103 | case 3: | ||
104 | mtpmr(PMRN_PMLCA3, pmlca); | ||
105 | mtpmr(PMRN_PMLCB3, pmlcb); | ||
106 | break; | ||
107 | default: | ||
108 | panic("Bad ctr number!\n"); | ||
109 | } | ||
110 | } | ||
111 | |||
112 | void set_pmc_event(int ctr, int event) | ||
113 | { | ||
114 | u32 pmlca; | ||
115 | |||
116 | pmlca = get_pmlca(ctr); | ||
117 | |||
118 | pmlca = (pmlca & ~PMLCA_EVENT_MASK) | | ||
119 | ((event << PMLCA_EVENT_SHIFT) & | ||
120 | PMLCA_EVENT_MASK); | ||
121 | |||
122 | set_pmlca(ctr, pmlca); | ||
123 | } | ||
124 | |||
125 | void set_pmc_user_kernel(int ctr, int user, int kernel) | ||
126 | { | ||
127 | u32 pmlca; | ||
128 | |||
129 | pmlca = get_pmlca(ctr); | ||
130 | |||
131 | if(user) | ||
132 | pmlca &= ~PMLCA_FCU; | ||
133 | else | ||
134 | pmlca |= PMLCA_FCU; | ||
135 | |||
136 | if(kernel) | ||
137 | pmlca &= ~PMLCA_FCS; | ||
138 | else | ||
139 | pmlca |= PMLCA_FCS; | ||
140 | |||
141 | set_pmlca(ctr, pmlca); | ||
142 | } | ||
143 | |||
144 | void set_pmc_marked(int ctr, int mark0, int mark1) | ||
145 | { | ||
146 | u32 pmlca = get_pmlca(ctr); | ||
147 | |||
148 | if(mark0) | ||
149 | pmlca &= ~PMLCA_FCM0; | ||
150 | else | ||
151 | pmlca |= PMLCA_FCM0; | ||
152 | |||
153 | if(mark1) | ||
154 | pmlca &= ~PMLCA_FCM1; | ||
155 | else | ||
156 | pmlca |= PMLCA_FCM1; | ||
157 | |||
158 | set_pmlca(ctr, pmlca); | ||
159 | } | ||
160 | |||
161 | void pmc_start_ctr(int ctr, int enable) | ||
162 | { | ||
163 | u32 pmlca = get_pmlca(ctr); | ||
164 | |||
165 | pmlca &= ~PMLCA_FC; | ||
166 | |||
167 | if (enable) | ||
168 | pmlca |= PMLCA_CE; | ||
169 | else | ||
170 | pmlca &= ~PMLCA_CE; | ||
171 | |||
172 | set_pmlca(ctr, pmlca); | ||
173 | } | ||
174 | |||
175 | void pmc_start_ctrs(int enable) | ||
176 | { | ||
177 | u32 pmgc0 = mfpmr(PMRN_PMGC0); | ||
178 | |||
179 | pmgc0 &= ~PMGC0_FAC; | ||
180 | pmgc0 |= PMGC0_FCECE; | ||
181 | |||
182 | if (enable) | ||
183 | pmgc0 |= PMGC0_PMIE; | ||
184 | else | ||
185 | pmgc0 &= ~PMGC0_PMIE; | ||
186 | |||
187 | mtpmr(PMRN_PMGC0, pmgc0); | ||
188 | } | ||
189 | |||
190 | void pmc_stop_ctrs(void) | ||
191 | { | ||
192 | u32 pmgc0 = mfpmr(PMRN_PMGC0); | ||
193 | |||
194 | pmgc0 |= PMGC0_FAC; | ||
195 | |||
196 | pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE); | ||
197 | |||
198 | mtpmr(PMRN_PMGC0, pmgc0); | ||
199 | } | ||
200 | |||
201 | void dump_pmcs(void) | ||
202 | { | ||
203 | printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0)); | ||
204 | printk("pmc\t\tpmlca\t\tpmlcb\n"); | ||
205 | printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0), | ||
206 | mfpmr(PMRN_PMLCA0), mfpmr(PMRN_PMLCB0)); | ||
207 | printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1), | ||
208 | mfpmr(PMRN_PMLCA1), mfpmr(PMRN_PMLCB1)); | ||
209 | printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2), | ||
210 | mfpmr(PMRN_PMLCA2), mfpmr(PMRN_PMLCB2)); | ||
211 | printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3), | ||
212 | mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3)); | ||
213 | } | ||
214 | |||
215 | EXPORT_SYMBOL(init_pmc_stop); | ||
216 | EXPORT_SYMBOL(set_pmc_event); | ||
217 | EXPORT_SYMBOL(set_pmc_user_kernel); | ||
218 | EXPORT_SYMBOL(set_pmc_marked); | ||
219 | EXPORT_SYMBOL(pmc_start_ctr); | ||
220 | EXPORT_SYMBOL(pmc_start_ctrs); | ||
221 | EXPORT_SYMBOL(pmc_stop_ctrs); | ||
222 | EXPORT_SYMBOL(dump_pmcs); | ||
diff --git a/arch/ppc/kernel/ppc-stub.c b/arch/ppc/kernel/ppc-stub.c new file mode 100644 index 000000000000..d61889c24046 --- /dev/null +++ b/arch/ppc/kernel/ppc-stub.c | |||
@@ -0,0 +1,867 @@ | |||
1 | /* | ||
2 | * ppc-stub.c: KGDB support for the Linux kernel. | ||
3 | * | ||
4 | * adapted from arch/sparc/kernel/sparc-stub.c for the PowerPC | ||
5 | * some stuff borrowed from Paul Mackerras' xmon | ||
6 | * Copyright (C) 1998 Michael AK Tesch (tesch@cs.wisc.edu) | ||
7 | * | ||
8 | * Modifications to run under Linux | ||
9 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
10 | * | ||
11 | * This file originally came from the gdb sources, and the | ||
12 | * copyright notices have been retained below. | ||
13 | */ | ||
14 | |||
15 | /**************************************************************************** | ||
16 | |||
17 | THIS SOFTWARE IS NOT COPYRIGHTED | ||
18 | |||
19 | HP offers the following for use in the public domain. HP makes no | ||
20 | warranty with regard to the software or its performance and the | ||
21 | user accepts the software "AS IS" with all faults. | ||
22 | |||
23 | HP DISCLAIMS ANY WARRANTIES, EXPRESS OR IMPLIED, WITH REGARD | ||
24 | TO THIS SOFTWARE INCLUDING BUT NOT LIMITED TO THE WARRANTIES | ||
25 | OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
26 | |||
27 | ****************************************************************************/ | ||
28 | |||
29 | /**************************************************************************** | ||
30 | * Header: remcom.c,v 1.34 91/03/09 12:29:49 glenne Exp $ | ||
31 | * | ||
32 | * Module name: remcom.c $ | ||
33 | * Revision: 1.34 $ | ||
34 | * Date: 91/03/09 12:29:49 $ | ||
35 | * Contributor: Lake Stevens Instrument Division$ | ||
36 | * | ||
37 | * Description: low level support for gdb debugger. $ | ||
38 | * | ||
39 | * Considerations: only works on target hardware $ | ||
40 | * | ||
41 | * Written by: Glenn Engel $ | ||
42 | * ModuleState: Experimental $ | ||
43 | * | ||
44 | * NOTES: See Below $ | ||
45 | * | ||
46 | * Modified for SPARC by Stu Grossman, Cygnus Support. | ||
47 | * | ||
48 | * This code has been extensively tested on the Fujitsu SPARClite demo board. | ||
49 | * | ||
50 | * To enable debugger support, two things need to happen. One, a | ||
51 | * call to set_debug_traps() is necessary in order to allow any breakpoints | ||
52 | * or error conditions to be properly intercepted and reported to gdb. | ||
53 | * Two, a breakpoint needs to be generated to begin communication. This | ||
54 | * is most easily accomplished by a call to breakpoint(). Breakpoint() | ||
55 | * simulates a breakpoint by executing a trap #1. | ||
56 | * | ||
57 | ************* | ||
58 | * | ||
59 | * The following gdb commands are supported: | ||
60 | * | ||
61 | * command function Return value | ||
62 | * | ||
63 | * g return the value of the CPU registers hex data or ENN | ||
64 | * G set the value of the CPU registers OK or ENN | ||
65 | * qOffsets Get section offsets. Reply is Text=xxx;Data=yyy;Bss=zzz | ||
66 | * | ||
67 | * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN | ||
68 | * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN | ||
69 | * | ||
70 | * c Resume at current address SNN ( signal NN) | ||
71 | * cAA..AA Continue at address AA..AA SNN | ||
72 | * | ||
73 | * s Step one instruction SNN | ||
74 | * sAA..AA Step one instruction from AA..AA SNN | ||
75 | * | ||
76 | * k kill | ||
77 | * | ||
78 | * ? What was the last sigval ? SNN (signal NN) | ||
79 | * | ||
80 | * bBB..BB Set baud rate to BB..BB OK or BNN, then sets | ||
81 | * baud rate | ||
82 | * | ||
83 | * All commands and responses are sent with a packet which includes a | ||
84 | * checksum. A packet consists of | ||
85 | * | ||
86 | * $<packet info>#<checksum>. | ||
87 | * | ||
88 | * where | ||
89 | * <packet info> :: <characters representing the command or response> | ||
90 | * <checksum> :: <two hex digits computed as modulo 256 sum of <packetinfo>> | ||
91 | * | ||
92 | * When a packet is received, it is first acknowledged with either '+' or '-'. | ||
93 | * '+' indicates a successful transfer. '-' indicates a failed transfer. | ||
94 | * | ||
95 | * Example: | ||
96 | * | ||
97 | * Host: Reply: | ||
98 | * $m0,10#2a +$00010203040506070809101112131415#42 | ||
99 | * | ||
100 | ****************************************************************************/ | ||
101 | |||
102 | #include <linux/config.h> | ||
103 | #include <linux/kernel.h> | ||
104 | #include <linux/string.h> | ||
105 | #include <linux/mm.h> | ||
106 | #include <linux/smp.h> | ||
107 | #include <linux/smp_lock.h> | ||
108 | #include <linux/init.h> | ||
109 | #include <linux/sysrq.h> | ||
110 | |||
111 | #include <asm/cacheflush.h> | ||
112 | #include <asm/system.h> | ||
113 | #include <asm/signal.h> | ||
114 | #include <asm/kgdb.h> | ||
115 | #include <asm/pgtable.h> | ||
116 | #include <asm/ptrace.h> | ||
117 | |||
118 | void breakinst(void); | ||
119 | |||
120 | /* | ||
121 | * BUFMAX defines the maximum number of characters in inbound/outbound buffers | ||
122 | * at least NUMREGBYTES*2 are needed for register packets | ||
123 | */ | ||
124 | #define BUFMAX 2048 | ||
125 | static char remcomInBuffer[BUFMAX]; | ||
126 | static char remcomOutBuffer[BUFMAX]; | ||
127 | |||
128 | static int initialized; | ||
129 | static int kgdb_active; | ||
130 | static int kgdb_started; | ||
131 | static u_int fault_jmp_buf[100]; | ||
132 | static int kdebug; | ||
133 | |||
134 | |||
135 | static const char hexchars[]="0123456789abcdef"; | ||
136 | |||
137 | /* Place where we save old trap entries for restoration - sparc*/ | ||
138 | /* struct tt_entry kgdb_savettable[256]; */ | ||
139 | /* typedef void (*trapfunc_t)(void); */ | ||
140 | |||
141 | static void kgdb_fault_handler(struct pt_regs *regs); | ||
142 | static int handle_exception (struct pt_regs *regs); | ||
143 | |||
144 | #if 0 | ||
145 | /* Install an exception handler for kgdb */ | ||
146 | static void exceptionHandler(int tnum, unsigned int *tfunc) | ||
147 | { | ||
148 | /* We are dorking with a live trap table, all irqs off */ | ||
149 | } | ||
150 | #endif | ||
151 | |||
152 | int | ||
153 | kgdb_setjmp(long *buf) | ||
154 | { | ||
155 | asm ("mflr 0; stw 0,0(%0);" | ||
156 | "stw 1,4(%0); stw 2,8(%0);" | ||
157 | "mfcr 0; stw 0,12(%0);" | ||
158 | "stmw 13,16(%0)" | ||
159 | : : "r" (buf)); | ||
160 | /* XXX should save fp regs as well */ | ||
161 | return 0; | ||
162 | } | ||
163 | void | ||
164 | kgdb_longjmp(long *buf, int val) | ||
165 | { | ||
166 | if (val == 0) | ||
167 | val = 1; | ||
168 | asm ("lmw 13,16(%0);" | ||
169 | "lwz 0,12(%0); mtcrf 0x38,0;" | ||
170 | "lwz 0,0(%0); lwz 1,4(%0); lwz 2,8(%0);" | ||
171 | "mtlr 0; mr 3,%1" | ||
172 | : : "r" (buf), "r" (val)); | ||
173 | } | ||
174 | /* Convert ch from a hex digit to an int */ | ||
175 | static int | ||
176 | hex(unsigned char ch) | ||
177 | { | ||
178 | if (ch >= 'a' && ch <= 'f') | ||
179 | return ch-'a'+10; | ||
180 | if (ch >= '0' && ch <= '9') | ||
181 | return ch-'0'; | ||
182 | if (ch >= 'A' && ch <= 'F') | ||
183 | return ch-'A'+10; | ||
184 | return -1; | ||
185 | } | ||
186 | |||
187 | /* Convert the memory pointed to by mem into hex, placing result in buf. | ||
188 | * Return a pointer to the last char put in buf (null), in case of mem fault, | ||
189 | * return 0. | ||
190 | */ | ||
191 | static unsigned char * | ||
192 | mem2hex(const char *mem, char *buf, int count) | ||
193 | { | ||
194 | unsigned char ch; | ||
195 | unsigned short tmp_s; | ||
196 | unsigned long tmp_l; | ||
197 | |||
198 | if (kgdb_setjmp((long*)fault_jmp_buf) == 0) { | ||
199 | debugger_fault_handler = kgdb_fault_handler; | ||
200 | |||
201 | /* Accessing 16 bit and 32 bit objects in a single | ||
202 | ** load instruction is required to avoid bad side | ||
203 | ** effects for some IO registers. | ||
204 | */ | ||
205 | |||
206 | if ((count == 2) && (((long)mem & 1) == 0)) { | ||
207 | tmp_s = *(unsigned short *)mem; | ||
208 | mem += 2; | ||
209 | *buf++ = hexchars[(tmp_s >> 12) & 0xf]; | ||
210 | *buf++ = hexchars[(tmp_s >> 8) & 0xf]; | ||
211 | *buf++ = hexchars[(tmp_s >> 4) & 0xf]; | ||
212 | *buf++ = hexchars[tmp_s & 0xf]; | ||
213 | |||
214 | } else if ((count == 4) && (((long)mem & 3) == 0)) { | ||
215 | tmp_l = *(unsigned int *)mem; | ||
216 | mem += 4; | ||
217 | *buf++ = hexchars[(tmp_l >> 28) & 0xf]; | ||
218 | *buf++ = hexchars[(tmp_l >> 24) & 0xf]; | ||
219 | *buf++ = hexchars[(tmp_l >> 20) & 0xf]; | ||
220 | *buf++ = hexchars[(tmp_l >> 16) & 0xf]; | ||
221 | *buf++ = hexchars[(tmp_l >> 12) & 0xf]; | ||
222 | *buf++ = hexchars[(tmp_l >> 8) & 0xf]; | ||
223 | *buf++ = hexchars[(tmp_l >> 4) & 0xf]; | ||
224 | *buf++ = hexchars[tmp_l & 0xf]; | ||
225 | |||
226 | } else { | ||
227 | while (count-- > 0) { | ||
228 | ch = *mem++; | ||
229 | *buf++ = hexchars[ch >> 4]; | ||
230 | *buf++ = hexchars[ch & 0xf]; | ||
231 | } | ||
232 | } | ||
233 | |||
234 | } else { | ||
235 | /* error condition */ | ||
236 | } | ||
237 | debugger_fault_handler = NULL; | ||
238 | *buf = 0; | ||
239 | return buf; | ||
240 | } | ||
241 | |||
242 | /* convert the hex array pointed to by buf into binary to be placed in mem | ||
243 | * return a pointer to the character AFTER the last byte written. | ||
244 | */ | ||
245 | static char * | ||
246 | hex2mem(char *buf, char *mem, int count) | ||
247 | { | ||
248 | unsigned char ch; | ||
249 | int i; | ||
250 | char *orig_mem; | ||
251 | unsigned short tmp_s; | ||
252 | unsigned long tmp_l; | ||
253 | |||
254 | orig_mem = mem; | ||
255 | |||
256 | if (kgdb_setjmp((long*)fault_jmp_buf) == 0) { | ||
257 | debugger_fault_handler = kgdb_fault_handler; | ||
258 | |||
259 | /* Accessing 16 bit and 32 bit objects in a single | ||
260 | ** store instruction is required to avoid bad side | ||
261 | ** effects for some IO registers. | ||
262 | */ | ||
263 | |||
264 | if ((count == 2) && (((long)mem & 1) == 0)) { | ||
265 | tmp_s = hex(*buf++) << 12; | ||
266 | tmp_s |= hex(*buf++) << 8; | ||
267 | tmp_s |= hex(*buf++) << 4; | ||
268 | tmp_s |= hex(*buf++); | ||
269 | |||
270 | *(unsigned short *)mem = tmp_s; | ||
271 | mem += 2; | ||
272 | |||
273 | } else if ((count == 4) && (((long)mem & 3) == 0)) { | ||
274 | tmp_l = hex(*buf++) << 28; | ||
275 | tmp_l |= hex(*buf++) << 24; | ||
276 | tmp_l |= hex(*buf++) << 20; | ||
277 | tmp_l |= hex(*buf++) << 16; | ||
278 | tmp_l |= hex(*buf++) << 12; | ||
279 | tmp_l |= hex(*buf++) << 8; | ||
280 | tmp_l |= hex(*buf++) << 4; | ||
281 | tmp_l |= hex(*buf++); | ||
282 | |||
283 | *(unsigned long *)mem = tmp_l; | ||
284 | mem += 4; | ||
285 | |||
286 | } else { | ||
287 | for (i=0; i<count; i++) { | ||
288 | ch = hex(*buf++) << 4; | ||
289 | ch |= hex(*buf++); | ||
290 | *mem++ = ch; | ||
291 | } | ||
292 | } | ||
293 | |||
294 | |||
295 | /* | ||
296 | ** Flush the data cache, invalidate the instruction cache. | ||
297 | */ | ||
298 | flush_icache_range((int)orig_mem, (int)orig_mem + count - 1); | ||
299 | |||
300 | } else { | ||
301 | /* error condition */ | ||
302 | } | ||
303 | debugger_fault_handler = NULL; | ||
304 | return mem; | ||
305 | } | ||
306 | |||
307 | /* | ||
308 | * While we find nice hex chars, build an int. | ||
309 | * Return number of chars processed. | ||
310 | */ | ||
311 | static int | ||
312 | hexToInt(char **ptr, int *intValue) | ||
313 | { | ||
314 | int numChars = 0; | ||
315 | int hexValue; | ||
316 | |||
317 | *intValue = 0; | ||
318 | |||
319 | if (kgdb_setjmp((long*)fault_jmp_buf) == 0) { | ||
320 | debugger_fault_handler = kgdb_fault_handler; | ||
321 | while (**ptr) { | ||
322 | hexValue = hex(**ptr); | ||
323 | if (hexValue < 0) | ||
324 | break; | ||
325 | |||
326 | *intValue = (*intValue << 4) | hexValue; | ||
327 | numChars ++; | ||
328 | |||
329 | (*ptr)++; | ||
330 | } | ||
331 | } else { | ||
332 | /* error condition */ | ||
333 | } | ||
334 | debugger_fault_handler = NULL; | ||
335 | |||
336 | return (numChars); | ||
337 | } | ||
338 | |||
339 | /* scan for the sequence $<data>#<checksum> */ | ||
340 | static void | ||
341 | getpacket(char *buffer) | ||
342 | { | ||
343 | unsigned char checksum; | ||
344 | unsigned char xmitcsum; | ||
345 | int i; | ||
346 | int count; | ||
347 | unsigned char ch; | ||
348 | |||
349 | do { | ||
350 | /* wait around for the start character, ignore all other | ||
351 | * characters */ | ||
352 | while ((ch = (getDebugChar() & 0x7f)) != '$') ; | ||
353 | |||
354 | checksum = 0; | ||
355 | xmitcsum = -1; | ||
356 | |||
357 | count = 0; | ||
358 | |||
359 | /* now, read until a # or end of buffer is found */ | ||
360 | while (count < BUFMAX) { | ||
361 | ch = getDebugChar() & 0x7f; | ||
362 | if (ch == '#') | ||
363 | break; | ||
364 | checksum = checksum + ch; | ||
365 | buffer[count] = ch; | ||
366 | count = count + 1; | ||
367 | } | ||
368 | |||
369 | if (count >= BUFMAX) | ||
370 | continue; | ||
371 | |||
372 | buffer[count] = 0; | ||
373 | |||
374 | if (ch == '#') { | ||
375 | xmitcsum = hex(getDebugChar() & 0x7f) << 4; | ||
376 | xmitcsum |= hex(getDebugChar() & 0x7f); | ||
377 | if (checksum != xmitcsum) | ||
378 | putDebugChar('-'); /* failed checksum */ | ||
379 | else { | ||
380 | putDebugChar('+'); /* successful transfer */ | ||
381 | /* if a sequence char is present, reply the ID */ | ||
382 | if (buffer[2] == ':') { | ||
383 | putDebugChar(buffer[0]); | ||
384 | putDebugChar(buffer[1]); | ||
385 | /* remove sequence chars from buffer */ | ||
386 | count = strlen(buffer); | ||
387 | for (i=3; i <= count; i++) | ||
388 | buffer[i-3] = buffer[i]; | ||
389 | } | ||
390 | } | ||
391 | } | ||
392 | } while (checksum != xmitcsum); | ||
393 | } | ||
394 | |||
395 | /* send the packet in buffer. */ | ||
396 | static void putpacket(unsigned char *buffer) | ||
397 | { | ||
398 | unsigned char checksum; | ||
399 | int count; | ||
400 | unsigned char ch, recv; | ||
401 | |||
402 | /* $<packet info>#<checksum>. */ | ||
403 | do { | ||
404 | putDebugChar('$'); | ||
405 | checksum = 0; | ||
406 | count = 0; | ||
407 | |||
408 | while ((ch = buffer[count])) { | ||
409 | putDebugChar(ch); | ||
410 | checksum += ch; | ||
411 | count += 1; | ||
412 | } | ||
413 | |||
414 | putDebugChar('#'); | ||
415 | putDebugChar(hexchars[checksum >> 4]); | ||
416 | putDebugChar(hexchars[checksum & 0xf]); | ||
417 | recv = getDebugChar(); | ||
418 | } while ((recv & 0x7f) != '+'); | ||
419 | } | ||
420 | |||
421 | static void kgdb_flush_cache_all(void) | ||
422 | { | ||
423 | flush_instruction_cache(); | ||
424 | } | ||
425 | |||
426 | /* Set up exception handlers for tracing and breakpoints | ||
427 | * [could be called kgdb_init()] | ||
428 | */ | ||
429 | void set_debug_traps(void) | ||
430 | { | ||
431 | #if 0 | ||
432 | unsigned char c; | ||
433 | |||
434 | save_and_cli(flags); | ||
435 | |||
436 | /* In case GDB is started before us, ack any packets (presumably | ||
437 | * "$?#xx") sitting there. | ||
438 | * | ||
439 | * I've found this code causes more problems than it solves, | ||
440 | * so that's why it's commented out. GDB seems to work fine | ||
441 | * now starting either before or after the kernel -bwb | ||
442 | */ | ||
443 | |||
444 | while((c = getDebugChar()) != '$'); | ||
445 | while((c = getDebugChar()) != '#'); | ||
446 | c = getDebugChar(); /* eat first csum byte */ | ||
447 | c = getDebugChar(); /* eat second csum byte */ | ||
448 | putDebugChar('+'); /* ack it */ | ||
449 | #endif | ||
450 | debugger = kgdb; | ||
451 | debugger_bpt = kgdb_bpt; | ||
452 | debugger_sstep = kgdb_sstep; | ||
453 | debugger_iabr_match = kgdb_iabr_match; | ||
454 | debugger_dabr_match = kgdb_dabr_match; | ||
455 | |||
456 | initialized = 1; | ||
457 | } | ||
458 | |||
459 | static void kgdb_fault_handler(struct pt_regs *regs) | ||
460 | { | ||
461 | kgdb_longjmp((long*)fault_jmp_buf, 1); | ||
462 | } | ||
463 | |||
464 | int kgdb_bpt(struct pt_regs *regs) | ||
465 | { | ||
466 | return handle_exception(regs); | ||
467 | } | ||
468 | |||
469 | int kgdb_sstep(struct pt_regs *regs) | ||
470 | { | ||
471 | return handle_exception(regs); | ||
472 | } | ||
473 | |||
474 | void kgdb(struct pt_regs *regs) | ||
475 | { | ||
476 | handle_exception(regs); | ||
477 | } | ||
478 | |||
479 | int kgdb_iabr_match(struct pt_regs *regs) | ||
480 | { | ||
481 | printk(KERN_ERR "kgdb doesn't support iabr, what?!?\n"); | ||
482 | return handle_exception(regs); | ||
483 | } | ||
484 | |||
485 | int kgdb_dabr_match(struct pt_regs *regs) | ||
486 | { | ||
487 | printk(KERN_ERR "kgdb doesn't support dabr, what?!?\n"); | ||
488 | return handle_exception(regs); | ||
489 | } | ||
490 | |||
491 | /* Convert the hardware trap type code to a unix signal number. */ | ||
492 | /* | ||
493 | * This table contains the mapping between PowerPC hardware trap types, and | ||
494 | * signals, which are primarily what GDB understands. | ||
495 | */ | ||
496 | static struct hard_trap_info | ||
497 | { | ||
498 | unsigned int tt; /* Trap type code for powerpc */ | ||
499 | unsigned char signo; /* Signal that we map this trap into */ | ||
500 | } hard_trap_info[] = { | ||
501 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | ||
502 | { 0x100, SIGINT }, /* critical input interrupt */ | ||
503 | { 0x200, SIGSEGV }, /* machine check */ | ||
504 | { 0x300, SIGSEGV }, /* data storage */ | ||
505 | { 0x400, SIGBUS }, /* instruction storage */ | ||
506 | { 0x500, SIGINT }, /* interrupt */ | ||
507 | { 0x600, SIGBUS }, /* alignment */ | ||
508 | { 0x700, SIGILL }, /* program */ | ||
509 | { 0x800, SIGILL }, /* reserved */ | ||
510 | { 0x900, SIGILL }, /* reserved */ | ||
511 | { 0xa00, SIGILL }, /* reserved */ | ||
512 | { 0xb00, SIGILL }, /* reserved */ | ||
513 | { 0xc00, SIGCHLD }, /* syscall */ | ||
514 | { 0xd00, SIGILL }, /* reserved */ | ||
515 | { 0xe00, SIGILL }, /* reserved */ | ||
516 | { 0xf00, SIGILL }, /* reserved */ | ||
517 | /* | ||
518 | ** 0x1000 PIT | ||
519 | ** 0x1010 FIT | ||
520 | ** 0x1020 watchdog | ||
521 | ** 0x1100 data TLB miss | ||
522 | ** 0x1200 instruction TLB miss | ||
523 | */ | ||
524 | { 0x2002, SIGTRAP}, /* debug */ | ||
525 | #else | ||
526 | { 0x200, SIGSEGV }, /* machine check */ | ||
527 | { 0x300, SIGSEGV }, /* address error (store) */ | ||
528 | { 0x400, SIGBUS }, /* instruction bus error */ | ||
529 | { 0x500, SIGINT }, /* interrupt */ | ||
530 | { 0x600, SIGBUS }, /* alingment */ | ||
531 | { 0x700, SIGTRAP }, /* breakpoint trap */ | ||
532 | { 0x800, SIGFPE }, /* fpu unavail */ | ||
533 | { 0x900, SIGALRM }, /* decrementer */ | ||
534 | { 0xa00, SIGILL }, /* reserved */ | ||
535 | { 0xb00, SIGILL }, /* reserved */ | ||
536 | { 0xc00, SIGCHLD }, /* syscall */ | ||
537 | { 0xd00, SIGTRAP }, /* single-step/watch */ | ||
538 | { 0xe00, SIGFPE }, /* fp assist */ | ||
539 | #endif | ||
540 | { 0, 0} /* Must be last */ | ||
541 | |||
542 | }; | ||
543 | |||
544 | static int computeSignal(unsigned int tt) | ||
545 | { | ||
546 | struct hard_trap_info *ht; | ||
547 | |||
548 | for (ht = hard_trap_info; ht->tt && ht->signo; ht++) | ||
549 | if (ht->tt == tt) | ||
550 | return ht->signo; | ||
551 | |||
552 | return SIGHUP; /* default for things we don't know about */ | ||
553 | } | ||
554 | |||
555 | #define PC_REGNUM 64 | ||
556 | #define SP_REGNUM 1 | ||
557 | |||
558 | /* | ||
559 | * This function does all command processing for interfacing to gdb. | ||
560 | */ | ||
561 | static int | ||
562 | handle_exception (struct pt_regs *regs) | ||
563 | { | ||
564 | int sigval; | ||
565 | int addr; | ||
566 | int length; | ||
567 | char *ptr; | ||
568 | unsigned int msr; | ||
569 | |||
570 | /* We don't handle user-mode breakpoints. */ | ||
571 | if (user_mode(regs)) | ||
572 | return 0; | ||
573 | |||
574 | if (debugger_fault_handler) { | ||
575 | debugger_fault_handler(regs); | ||
576 | panic("kgdb longjump failed!\n"); | ||
577 | } | ||
578 | if (kgdb_active) { | ||
579 | printk(KERN_ERR "interrupt while in kgdb, returning\n"); | ||
580 | return 0; | ||
581 | } | ||
582 | |||
583 | kgdb_active = 1; | ||
584 | kgdb_started = 1; | ||
585 | |||
586 | #ifdef KGDB_DEBUG | ||
587 | printk("kgdb: entering handle_exception; trap [0x%x]\n", | ||
588 | (unsigned int)regs->trap); | ||
589 | #endif | ||
590 | |||
591 | kgdb_interruptible(0); | ||
592 | lock_kernel(); | ||
593 | msr = mfmsr(); | ||
594 | mtmsr(msr & ~MSR_EE); /* disable interrupts */ | ||
595 | |||
596 | if (regs->nip == (unsigned long)breakinst) { | ||
597 | /* Skip over breakpoint trap insn */ | ||
598 | regs->nip += 4; | ||
599 | } | ||
600 | |||
601 | /* reply to host that an exception has occurred */ | ||
602 | sigval = computeSignal(regs->trap); | ||
603 | ptr = remcomOutBuffer; | ||
604 | |||
605 | *ptr++ = 'T'; | ||
606 | *ptr++ = hexchars[sigval >> 4]; | ||
607 | *ptr++ = hexchars[sigval & 0xf]; | ||
608 | *ptr++ = hexchars[PC_REGNUM >> 4]; | ||
609 | *ptr++ = hexchars[PC_REGNUM & 0xf]; | ||
610 | *ptr++ = ':'; | ||
611 | ptr = mem2hex((char *)®s->nip, ptr, 4); | ||
612 | *ptr++ = ';'; | ||
613 | *ptr++ = hexchars[SP_REGNUM >> 4]; | ||
614 | *ptr++ = hexchars[SP_REGNUM & 0xf]; | ||
615 | *ptr++ = ':'; | ||
616 | ptr = mem2hex(((char *)regs) + SP_REGNUM*4, ptr, 4); | ||
617 | *ptr++ = ';'; | ||
618 | *ptr++ = 0; | ||
619 | |||
620 | putpacket(remcomOutBuffer); | ||
621 | if (kdebug) | ||
622 | printk("remcomOutBuffer: %s\n", remcomOutBuffer); | ||
623 | |||
624 | /* XXX We may want to add some features dealing with poking the | ||
625 | * XXX page tables, ... (look at sparc-stub.c for more info) | ||
626 | * XXX also required hacking to the gdb sources directly... | ||
627 | */ | ||
628 | |||
629 | while (1) { | ||
630 | remcomOutBuffer[0] = 0; | ||
631 | |||
632 | getpacket(remcomInBuffer); | ||
633 | switch (remcomInBuffer[0]) { | ||
634 | case '?': /* report most recent signal */ | ||
635 | remcomOutBuffer[0] = 'S'; | ||
636 | remcomOutBuffer[1] = hexchars[sigval >> 4]; | ||
637 | remcomOutBuffer[2] = hexchars[sigval & 0xf]; | ||
638 | remcomOutBuffer[3] = 0; | ||
639 | break; | ||
640 | #if 0 | ||
641 | case 'q': /* this screws up gdb for some reason...*/ | ||
642 | { | ||
643 | extern long _start, sdata, __bss_start; | ||
644 | |||
645 | ptr = &remcomInBuffer[1]; | ||
646 | if (strncmp(ptr, "Offsets", 7) != 0) | ||
647 | break; | ||
648 | |||
649 | ptr = remcomOutBuffer; | ||
650 | sprintf(ptr, "Text=%8.8x;Data=%8.8x;Bss=%8.8x", | ||
651 | &_start, &sdata, &__bss_start); | ||
652 | break; | ||
653 | } | ||
654 | #endif | ||
655 | case 'd': | ||
656 | /* toggle debug flag */ | ||
657 | kdebug ^= 1; | ||
658 | break; | ||
659 | |||
660 | case 'g': /* return the value of the CPU registers. | ||
661 | * some of them are non-PowerPC names :( | ||
662 | * they are stored in gdb like: | ||
663 | * struct { | ||
664 | * u32 gpr[32]; | ||
665 | * f64 fpr[32]; | ||
666 | * u32 pc, ps, cnd, lr; (ps=msr) | ||
667 | * u32 cnt, xer, mq; | ||
668 | * } | ||
669 | */ | ||
670 | { | ||
671 | int i; | ||
672 | ptr = remcomOutBuffer; | ||
673 | /* General Purpose Regs */ | ||
674 | ptr = mem2hex((char *)regs, ptr, 32 * 4); | ||
675 | /* Floating Point Regs - FIXME */ | ||
676 | /*ptr = mem2hex((char *), ptr, 32 * 8);*/ | ||
677 | for(i=0; i<(32*8*2); i++) { /* 2chars/byte */ | ||
678 | ptr[i] = '0'; | ||
679 | } | ||
680 | ptr += 32*8*2; | ||
681 | /* pc, msr, cr, lr, ctr, xer, (mq is unused) */ | ||
682 | ptr = mem2hex((char *)®s->nip, ptr, 4); | ||
683 | ptr = mem2hex((char *)®s->msr, ptr, 4); | ||
684 | ptr = mem2hex((char *)®s->ccr, ptr, 4); | ||
685 | ptr = mem2hex((char *)®s->link, ptr, 4); | ||
686 | ptr = mem2hex((char *)®s->ctr, ptr, 4); | ||
687 | ptr = mem2hex((char *)®s->xer, ptr, 4); | ||
688 | } | ||
689 | break; | ||
690 | |||
691 | case 'G': /* set the value of the CPU registers */ | ||
692 | { | ||
693 | ptr = &remcomInBuffer[1]; | ||
694 | |||
695 | /* | ||
696 | * If the stack pointer has moved, you should pray. | ||
697 | * (cause only god can help you). | ||
698 | */ | ||
699 | |||
700 | /* General Purpose Regs */ | ||
701 | hex2mem(ptr, (char *)regs, 32 * 4); | ||
702 | |||
703 | /* Floating Point Regs - FIXME?? */ | ||
704 | /*ptr = hex2mem(ptr, ??, 32 * 8);*/ | ||
705 | ptr += 32*8*2; | ||
706 | |||
707 | /* pc, msr, cr, lr, ctr, xer, (mq is unused) */ | ||
708 | ptr = hex2mem(ptr, (char *)®s->nip, 4); | ||
709 | ptr = hex2mem(ptr, (char *)®s->msr, 4); | ||
710 | ptr = hex2mem(ptr, (char *)®s->ccr, 4); | ||
711 | ptr = hex2mem(ptr, (char *)®s->link, 4); | ||
712 | ptr = hex2mem(ptr, (char *)®s->ctr, 4); | ||
713 | ptr = hex2mem(ptr, (char *)®s->xer, 4); | ||
714 | |||
715 | strcpy(remcomOutBuffer,"OK"); | ||
716 | } | ||
717 | break; | ||
718 | case 'H': | ||
719 | /* don't do anything, yet, just acknowledge */ | ||
720 | hexToInt(&ptr, &addr); | ||
721 | strcpy(remcomOutBuffer,"OK"); | ||
722 | break; | ||
723 | |||
724 | case 'm': /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */ | ||
725 | /* Try to read %x,%x. */ | ||
726 | |||
727 | ptr = &remcomInBuffer[1]; | ||
728 | |||
729 | if (hexToInt(&ptr, &addr) && *ptr++ == ',' | ||
730 | && hexToInt(&ptr, &length)) { | ||
731 | if (mem2hex((char *)addr, remcomOutBuffer, | ||
732 | length)) | ||
733 | break; | ||
734 | strcpy(remcomOutBuffer, "E03"); | ||
735 | } else | ||
736 | strcpy(remcomOutBuffer, "E01"); | ||
737 | break; | ||
738 | |||
739 | case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK */ | ||
740 | /* Try to read '%x,%x:'. */ | ||
741 | |||
742 | ptr = &remcomInBuffer[1]; | ||
743 | |||
744 | if (hexToInt(&ptr, &addr) && *ptr++ == ',' | ||
745 | && hexToInt(&ptr, &length) | ||
746 | && *ptr++ == ':') { | ||
747 | if (hex2mem(ptr, (char *)addr, length)) | ||
748 | strcpy(remcomOutBuffer, "OK"); | ||
749 | else | ||
750 | strcpy(remcomOutBuffer, "E03"); | ||
751 | flush_icache_range(addr, addr+length); | ||
752 | } else | ||
753 | strcpy(remcomOutBuffer, "E02"); | ||
754 | break; | ||
755 | |||
756 | |||
757 | case 'k': /* kill the program, actually just continue */ | ||
758 | case 'c': /* cAA..AA Continue; address AA..AA optional */ | ||
759 | /* try to read optional parameter, pc unchanged if no parm */ | ||
760 | |||
761 | ptr = &remcomInBuffer[1]; | ||
762 | if (hexToInt(&ptr, &addr)) | ||
763 | regs->nip = addr; | ||
764 | |||
765 | /* Need to flush the instruction cache here, as we may have deposited a | ||
766 | * breakpoint, and the icache probably has no way of knowing that a data ref to | ||
767 | * some location may have changed something that is in the instruction cache. | ||
768 | */ | ||
769 | kgdb_flush_cache_all(); | ||
770 | mtmsr(msr); | ||
771 | |||
772 | kgdb_interruptible(1); | ||
773 | unlock_kernel(); | ||
774 | kgdb_active = 0; | ||
775 | if (kdebug) { | ||
776 | printk("remcomInBuffer: %s\n", remcomInBuffer); | ||
777 | printk("remcomOutBuffer: %s\n", remcomOutBuffer); | ||
778 | } | ||
779 | return 1; | ||
780 | |||
781 | case 's': | ||
782 | kgdb_flush_cache_all(); | ||
783 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | ||
784 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC); | ||
785 | regs->msr |= MSR_DE; | ||
786 | #else | ||
787 | regs->msr |= MSR_SE; | ||
788 | #endif | ||
789 | unlock_kernel(); | ||
790 | kgdb_active = 0; | ||
791 | if (kdebug) { | ||
792 | printk("remcomInBuffer: %s\n", remcomInBuffer); | ||
793 | printk("remcomOutBuffer: %s\n", remcomOutBuffer); | ||
794 | } | ||
795 | return 1; | ||
796 | |||
797 | case 'r': /* Reset (if user process..exit ???)*/ | ||
798 | panic("kgdb reset."); | ||
799 | break; | ||
800 | } /* switch */ | ||
801 | if (remcomOutBuffer[0] && kdebug) { | ||
802 | printk("remcomInBuffer: %s\n", remcomInBuffer); | ||
803 | printk("remcomOutBuffer: %s\n", remcomOutBuffer); | ||
804 | } | ||
805 | /* reply to the request */ | ||
806 | putpacket(remcomOutBuffer); | ||
807 | } /* while(1) */ | ||
808 | } | ||
809 | |||
810 | /* This function will generate a breakpoint exception. It is used at the | ||
811 | beginning of a program to sync up with a debugger and can be used | ||
812 | otherwise as a quick means to stop program execution and "break" into | ||
813 | the debugger. */ | ||
814 | |||
815 | void | ||
816 | breakpoint(void) | ||
817 | { | ||
818 | if (!initialized) { | ||
819 | printk("breakpoint() called b4 kgdb init\n"); | ||
820 | return; | ||
821 | } | ||
822 | |||
823 | asm(" .globl breakinst \n\ | ||
824 | breakinst: .long 0x7d821008"); | ||
825 | } | ||
826 | |||
827 | #ifdef CONFIG_KGDB_CONSOLE | ||
828 | /* Output string in GDB O-packet format if GDB has connected. If nothing | ||
829 | output, returns 0 (caller must then handle output). */ | ||
830 | int | ||
831 | kgdb_output_string (const char* s, unsigned int count) | ||
832 | { | ||
833 | char buffer[512]; | ||
834 | |||
835 | if (!kgdb_started) | ||
836 | return 0; | ||
837 | |||
838 | count = (count <= (sizeof(buffer) / 2 - 2)) | ||
839 | ? count : (sizeof(buffer) / 2 - 2); | ||
840 | |||
841 | buffer[0] = 'O'; | ||
842 | mem2hex (s, &buffer[1], count); | ||
843 | putpacket(buffer); | ||
844 | |||
845 | return 1; | ||
846 | } | ||
847 | #endif | ||
848 | |||
849 | static void sysrq_handle_gdb(int key, struct pt_regs *pt_regs, | ||
850 | struct tty_struct *tty) | ||
851 | { | ||
852 | printk("Entering GDB stub\n"); | ||
853 | breakpoint(); | ||
854 | } | ||
855 | static struct sysrq_key_op sysrq_gdb_op = { | ||
856 | .handler = sysrq_handle_gdb, | ||
857 | .help_msg = "Gdb", | ||
858 | .action_msg = "GDB", | ||
859 | }; | ||
860 | |||
861 | static int gdb_register_sysrq(void) | ||
862 | { | ||
863 | printk("Registering GDB sysrq handler\n"); | ||
864 | register_sysrq_key('g', &sysrq_gdb_op); | ||
865 | return 0; | ||
866 | } | ||
867 | module_init(gdb_register_sysrq); | ||
diff --git a/arch/ppc/kernel/ppc_htab.c b/arch/ppc/kernel/ppc_htab.c new file mode 100644 index 000000000000..ca810025993f --- /dev/null +++ b/arch/ppc/kernel/ppc_htab.c | |||
@@ -0,0 +1,467 @@ | |||
1 | /* | ||
2 | * PowerPC hash table management proc entry. Will show information | ||
3 | * about the current hash table and will allow changes to it. | ||
4 | * | ||
5 | * Written by Cort Dougan (cort@cs.nmt.edu) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/proc_fs.h> | ||
17 | #include <linux/stat.h> | ||
18 | #include <linux/sysctl.h> | ||
19 | #include <linux/ctype.h> | ||
20 | #include <linux/threads.h> | ||
21 | #include <linux/smp_lock.h> | ||
22 | #include <linux/seq_file.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/bitops.h> | ||
25 | |||
26 | #include <asm/uaccess.h> | ||
27 | #include <asm/mmu.h> | ||
28 | #include <asm/residual.h> | ||
29 | #include <asm/io.h> | ||
30 | #include <asm/pgtable.h> | ||
31 | #include <asm/cputable.h> | ||
32 | #include <asm/system.h> | ||
33 | #include <asm/reg.h> | ||
34 | |||
35 | static int ppc_htab_show(struct seq_file *m, void *v); | ||
36 | static ssize_t ppc_htab_write(struct file * file, const char __user * buffer, | ||
37 | size_t count, loff_t *ppos); | ||
38 | extern PTE *Hash, *Hash_end; | ||
39 | extern unsigned long Hash_size, Hash_mask; | ||
40 | extern unsigned long _SDR1; | ||
41 | extern unsigned long htab_reloads; | ||
42 | extern unsigned long htab_preloads; | ||
43 | extern unsigned long htab_evicts; | ||
44 | extern unsigned long pte_misses; | ||
45 | extern unsigned long pte_errors; | ||
46 | extern unsigned int primary_pteg_full; | ||
47 | extern unsigned int htab_hash_searches; | ||
48 | |||
49 | static int ppc_htab_open(struct inode *inode, struct file *file) | ||
50 | { | ||
51 | return single_open(file, ppc_htab_show, NULL); | ||
52 | } | ||
53 | |||
54 | struct file_operations ppc_htab_operations = { | ||
55 | .open = ppc_htab_open, | ||
56 | .read = seq_read, | ||
57 | .llseek = seq_lseek, | ||
58 | .write = ppc_htab_write, | ||
59 | .release = single_release, | ||
60 | }; | ||
61 | |||
62 | static char *pmc1_lookup(unsigned long mmcr0) | ||
63 | { | ||
64 | switch ( mmcr0 & (0x7f<<7) ) | ||
65 | { | ||
66 | case 0x0: | ||
67 | return "none"; | ||
68 | case MMCR0_PMC1_CYCLES: | ||
69 | return "cycles"; | ||
70 | case MMCR0_PMC1_ICACHEMISS: | ||
71 | return "ic miss"; | ||
72 | case MMCR0_PMC1_DTLB: | ||
73 | return "dtlb miss"; | ||
74 | default: | ||
75 | return "unknown"; | ||
76 | } | ||
77 | } | ||
78 | |||
79 | static char *pmc2_lookup(unsigned long mmcr0) | ||
80 | { | ||
81 | switch ( mmcr0 & 0x3f ) | ||
82 | { | ||
83 | case 0x0: | ||
84 | return "none"; | ||
85 | case MMCR0_PMC2_CYCLES: | ||
86 | return "cycles"; | ||
87 | case MMCR0_PMC2_DCACHEMISS: | ||
88 | return "dc miss"; | ||
89 | case MMCR0_PMC2_ITLB: | ||
90 | return "itlb miss"; | ||
91 | case MMCR0_PMC2_LOADMISSTIME: | ||
92 | return "load miss time"; | ||
93 | default: | ||
94 | return "unknown"; | ||
95 | } | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * print some useful info about the hash table. This function | ||
100 | * is _REALLY_ slow (see the nested for loops below) but nothing | ||
101 | * in here should be really timing critical. -- Cort | ||
102 | */ | ||
103 | static int ppc_htab_show(struct seq_file *m, void *v) | ||
104 | { | ||
105 | unsigned long mmcr0 = 0, pmc1 = 0, pmc2 = 0; | ||
106 | #if defined(CONFIG_PPC_STD_MMU) && !defined(CONFIG_PPC64BRIDGE) | ||
107 | unsigned int kptes = 0, uptes = 0; | ||
108 | PTE *ptr; | ||
109 | #endif /* CONFIG_PPC_STD_MMU */ | ||
110 | |||
111 | if (cpu_has_feature(CPU_FTR_604_PERF_MON)) { | ||
112 | mmcr0 = mfspr(SPRN_MMCR0); | ||
113 | pmc1 = mfspr(SPRN_PMC1); | ||
114 | pmc2 = mfspr(SPRN_PMC2); | ||
115 | seq_printf(m, | ||
116 | "604 Performance Monitoring\n" | ||
117 | "MMCR0\t\t: %08lx %s%s ", | ||
118 | mmcr0, | ||
119 | ( mmcr0>>28 & 0x2 ) ? "(user mode counted)" : "", | ||
120 | ( mmcr0>>28 & 0x4 ) ? "(kernel mode counted)" : ""); | ||
121 | seq_printf(m, | ||
122 | "\nPMC1\t\t: %08lx (%s)\n" | ||
123 | "PMC2\t\t: %08lx (%s)\n", | ||
124 | pmc1, pmc1_lookup(mmcr0), | ||
125 | pmc2, pmc2_lookup(mmcr0)); | ||
126 | } | ||
127 | |||
128 | #ifdef CONFIG_PPC_STD_MMU | ||
129 | /* if we don't have a htab */ | ||
130 | if ( Hash_size == 0 ) { | ||
131 | seq_printf(m, "No Hash Table used\n"); | ||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | #ifndef CONFIG_PPC64BRIDGE | ||
136 | for (ptr = Hash; ptr < Hash_end; ptr++) { | ||
137 | unsigned int mctx, vsid; | ||
138 | |||
139 | if (!ptr->v) | ||
140 | continue; | ||
141 | /* undo the esid skew */ | ||
142 | vsid = ptr->vsid; | ||
143 | mctx = ((vsid - (vsid & 0xf) * 0x111) >> 4) & 0xfffff; | ||
144 | if (mctx == 0) | ||
145 | kptes++; | ||
146 | else | ||
147 | uptes++; | ||
148 | } | ||
149 | #endif | ||
150 | |||
151 | seq_printf(m, | ||
152 | "PTE Hash Table Information\n" | ||
153 | "Size\t\t: %luKb\n" | ||
154 | "Buckets\t\t: %lu\n" | ||
155 | "Address\t\t: %08lx\n" | ||
156 | "Entries\t\t: %lu\n" | ||
157 | #ifndef CONFIG_PPC64BRIDGE | ||
158 | "User ptes\t: %u\n" | ||
159 | "Kernel ptes\t: %u\n" | ||
160 | "Percent full\t: %lu%%\n" | ||
161 | #endif | ||
162 | , (unsigned long)(Hash_size>>10), | ||
163 | (Hash_size/(sizeof(PTE)*8)), | ||
164 | (unsigned long)Hash, | ||
165 | Hash_size/sizeof(PTE) | ||
166 | #ifndef CONFIG_PPC64BRIDGE | ||
167 | , uptes, | ||
168 | kptes, | ||
169 | ((kptes+uptes)*100) / (Hash_size/sizeof(PTE)) | ||
170 | #endif | ||
171 | ); | ||
172 | |||
173 | seq_printf(m, | ||
174 | "Reloads\t\t: %lu\n" | ||
175 | "Preloads\t: %lu\n" | ||
176 | "Searches\t: %u\n" | ||
177 | "Overflows\t: %u\n" | ||
178 | "Evicts\t\t: %lu\n", | ||
179 | htab_reloads, htab_preloads, htab_hash_searches, | ||
180 | primary_pteg_full, htab_evicts); | ||
181 | #endif /* CONFIG_PPC_STD_MMU */ | ||
182 | |||
183 | seq_printf(m, | ||
184 | "Non-error misses: %lu\n" | ||
185 | "Error misses\t: %lu\n", | ||
186 | pte_misses, pte_errors); | ||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * Allow user to define performance counters and resize the hash table | ||
192 | */ | ||
193 | static ssize_t ppc_htab_write(struct file * file, const char __user * ubuffer, | ||
194 | size_t count, loff_t *ppos) | ||
195 | { | ||
196 | #ifdef CONFIG_PPC_STD_MMU | ||
197 | unsigned long tmp; | ||
198 | char buffer[16]; | ||
199 | |||
200 | if (!capable(CAP_SYS_ADMIN)) | ||
201 | return -EACCES; | ||
202 | if (strncpy_from_user(buffer, ubuffer, 15)) | ||
203 | return -EFAULT; | ||
204 | buffer[15] = 0; | ||
205 | |||
206 | /* don't set the htab size for now */ | ||
207 | if ( !strncmp( buffer, "size ", 5) ) | ||
208 | return -EBUSY; | ||
209 | |||
210 | if ( !strncmp( buffer, "reset", 5) ) | ||
211 | { | ||
212 | if (cpu_has_feature(CPU_FTR_604_PERF_MON)) { | ||
213 | /* reset PMC1 and PMC2 */ | ||
214 | mtspr(SPRN_PMC1, 0); | ||
215 | mtspr(SPRN_PMC2, 0); | ||
216 | } | ||
217 | htab_reloads = 0; | ||
218 | htab_evicts = 0; | ||
219 | pte_misses = 0; | ||
220 | pte_errors = 0; | ||
221 | } | ||
222 | |||
223 | /* Everything below here requires the performance monitor feature. */ | ||
224 | if (!cpu_has_feature(CPU_FTR_604_PERF_MON)) | ||
225 | return count; | ||
226 | |||
227 | /* turn off performance monitoring */ | ||
228 | if ( !strncmp( buffer, "off", 3) ) | ||
229 | { | ||
230 | mtspr(SPRN_MMCR0, 0); | ||
231 | mtspr(SPRN_PMC1, 0); | ||
232 | mtspr(SPRN_PMC2, 0); | ||
233 | } | ||
234 | |||
235 | if ( !strncmp( buffer, "user", 4) ) | ||
236 | { | ||
237 | /* setup mmcr0 and clear the correct pmc */ | ||
238 | tmp = (mfspr(SPRN_MMCR0) & ~(0x60000000)) | 0x20000000; | ||
239 | mtspr(SPRN_MMCR0, tmp); | ||
240 | mtspr(SPRN_PMC1, 0); | ||
241 | mtspr(SPRN_PMC2, 0); | ||
242 | } | ||
243 | |||
244 | if ( !strncmp( buffer, "kernel", 6) ) | ||
245 | { | ||
246 | /* setup mmcr0 and clear the correct pmc */ | ||
247 | tmp = (mfspr(SPRN_MMCR0) & ~(0x60000000)) | 0x40000000; | ||
248 | mtspr(SPRN_MMCR0, tmp); | ||
249 | mtspr(SPRN_PMC1, 0); | ||
250 | mtspr(SPRN_PMC2, 0); | ||
251 | } | ||
252 | |||
253 | /* PMC1 values */ | ||
254 | if ( !strncmp( buffer, "dtlb", 4) ) | ||
255 | { | ||
256 | /* setup mmcr0 and clear the correct pmc */ | ||
257 | tmp = (mfspr(SPRN_MMCR0) & ~(0x7F << 7)) | MMCR0_PMC1_DTLB; | ||
258 | mtspr(SPRN_MMCR0, tmp); | ||
259 | mtspr(SPRN_PMC1, 0); | ||
260 | } | ||
261 | |||
262 | if ( !strncmp( buffer, "ic miss", 7) ) | ||
263 | { | ||
264 | /* setup mmcr0 and clear the correct pmc */ | ||
265 | tmp = (mfspr(SPRN_MMCR0) & ~(0x7F<<7)) | MMCR0_PMC1_ICACHEMISS; | ||
266 | mtspr(SPRN_MMCR0, tmp); | ||
267 | mtspr(SPRN_PMC1, 0); | ||
268 | } | ||
269 | |||
270 | /* PMC2 values */ | ||
271 | if ( !strncmp( buffer, "load miss time", 14) ) | ||
272 | { | ||
273 | /* setup mmcr0 and clear the correct pmc */ | ||
274 | asm volatile( | ||
275 | "mfspr %0,%1\n\t" /* get current mccr0 */ | ||
276 | "rlwinm %0,%0,0,0,31-6\n\t" /* clear bits [26-31] */ | ||
277 | "ori %0,%0,%2 \n\t" /* or in mmcr0 settings */ | ||
278 | "mtspr %1,%0 \n\t" /* set new mccr0 */ | ||
279 | "mtspr %3,%4 \n\t" /* reset the pmc */ | ||
280 | : "=r" (tmp) | ||
281 | : "i" (SPRN_MMCR0), | ||
282 | "i" (MMCR0_PMC2_LOADMISSTIME), | ||
283 | "i" (SPRN_PMC2), "r" (0) ); | ||
284 | } | ||
285 | |||
286 | if ( !strncmp( buffer, "itlb", 4) ) | ||
287 | { | ||
288 | /* setup mmcr0 and clear the correct pmc */ | ||
289 | asm volatile( | ||
290 | "mfspr %0,%1\n\t" /* get current mccr0 */ | ||
291 | "rlwinm %0,%0,0,0,31-6\n\t" /* clear bits [26-31] */ | ||
292 | "ori %0,%0,%2 \n\t" /* or in mmcr0 settings */ | ||
293 | "mtspr %1,%0 \n\t" /* set new mccr0 */ | ||
294 | "mtspr %3,%4 \n\t" /* reset the pmc */ | ||
295 | : "=r" (tmp) | ||
296 | : "i" (SPRN_MMCR0), "i" (MMCR0_PMC2_ITLB), | ||
297 | "i" (SPRN_PMC2), "r" (0) ); | ||
298 | } | ||
299 | |||
300 | if ( !strncmp( buffer, "dc miss", 7) ) | ||
301 | { | ||
302 | /* setup mmcr0 and clear the correct pmc */ | ||
303 | asm volatile( | ||
304 | "mfspr %0,%1\n\t" /* get current mccr0 */ | ||
305 | "rlwinm %0,%0,0,0,31-6\n\t" /* clear bits [26-31] */ | ||
306 | "ori %0,%0,%2 \n\t" /* or in mmcr0 settings */ | ||
307 | "mtspr %1,%0 \n\t" /* set new mccr0 */ | ||
308 | "mtspr %3,%4 \n\t" /* reset the pmc */ | ||
309 | : "=r" (tmp) | ||
310 | : "i" (SPRN_MMCR0), "i" (MMCR0_PMC2_DCACHEMISS), | ||
311 | "i" (SPRN_PMC2), "r" (0) ); | ||
312 | } | ||
313 | |||
314 | return count; | ||
315 | #else /* CONFIG_PPC_STD_MMU */ | ||
316 | return 0; | ||
317 | #endif /* CONFIG_PPC_STD_MMU */ | ||
318 | } | ||
319 | |||
320 | int proc_dol2crvec(ctl_table *table, int write, struct file *filp, | ||
321 | void __user *buffer_arg, size_t *lenp, loff_t *ppos) | ||
322 | { | ||
323 | int vleft, first=1, len, left, val; | ||
324 | char __user *buffer = (char __user *) buffer_arg; | ||
325 | #define TMPBUFLEN 256 | ||
326 | char buf[TMPBUFLEN], *p; | ||
327 | static const char *sizestrings[4] = { | ||
328 | "2MB", "256KB", "512KB", "1MB" | ||
329 | }; | ||
330 | static const char *clockstrings[8] = { | ||
331 | "clock disabled", "+1 clock", "+1.5 clock", "reserved(3)", | ||
332 | "+2 clock", "+2.5 clock", "+3 clock", "reserved(7)" | ||
333 | }; | ||
334 | static const char *typestrings[4] = { | ||
335 | "flow-through burst SRAM", "reserved SRAM", | ||
336 | "pipelined burst SRAM", "pipelined late-write SRAM" | ||
337 | }; | ||
338 | static const char *holdstrings[4] = { | ||
339 | "0.5", "1.0", "(reserved2)", "(reserved3)" | ||
340 | }; | ||
341 | |||
342 | if (!cpu_has_feature(CPU_FTR_L2CR)) | ||
343 | return -EFAULT; | ||
344 | |||
345 | if ( /*!table->maxlen ||*/ (*ppos && !write)) { | ||
346 | *lenp = 0; | ||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | vleft = table->maxlen / sizeof(int); | ||
351 | left = *lenp; | ||
352 | |||
353 | for (; left /*&& vleft--*/; first=0) { | ||
354 | if (write) { | ||
355 | while (left) { | ||
356 | char c; | ||
357 | if(get_user(c, buffer)) | ||
358 | return -EFAULT; | ||
359 | if (!isspace(c)) | ||
360 | break; | ||
361 | left--; | ||
362 | buffer++; | ||
363 | } | ||
364 | if (!left) | ||
365 | break; | ||
366 | len = left; | ||
367 | if (len > TMPBUFLEN-1) | ||
368 | len = TMPBUFLEN-1; | ||
369 | if(copy_from_user(buf, buffer, len)) | ||
370 | return -EFAULT; | ||
371 | buf[len] = 0; | ||
372 | p = buf; | ||
373 | if (*p < '0' || *p > '9') | ||
374 | break; | ||
375 | val = simple_strtoul(p, &p, 0); | ||
376 | len = p-buf; | ||
377 | if ((len < left) && *p && !isspace(*p)) | ||
378 | break; | ||
379 | buffer += len; | ||
380 | left -= len; | ||
381 | _set_L2CR(val); | ||
382 | } else { | ||
383 | p = buf; | ||
384 | if (!first) | ||
385 | *p++ = '\t'; | ||
386 | val = _get_L2CR(); | ||
387 | p += sprintf(p, "0x%08x: ", val); | ||
388 | p += sprintf(p, " %s", (val >> 31) & 1 ? "enabled" : | ||
389 | "disabled"); | ||
390 | p += sprintf(p, ", %sparity", (val>>30)&1 ? "" : "no "); | ||
391 | p += sprintf(p, ", %s", sizestrings[(val >> 28) & 3]); | ||
392 | p += sprintf(p, ", %s", clockstrings[(val >> 25) & 7]); | ||
393 | p += sprintf(p, ", %s", typestrings[(val >> 23) & 2]); | ||
394 | p += sprintf(p, "%s", (val>>22)&1 ? ", data only" : ""); | ||
395 | p += sprintf(p, "%s", (val>>20)&1 ? ", ZZ enabled": ""); | ||
396 | p += sprintf(p, ", %s", (val>>19)&1 ? "write-through" : | ||
397 | "copy-back"); | ||
398 | p += sprintf(p, "%s", (val>>18)&1 ? ", testing" : ""); | ||
399 | p += sprintf(p, ", %sns hold",holdstrings[(val>>16)&3]); | ||
400 | p += sprintf(p, "%s", (val>>15)&1 ? ", DLL slow" : ""); | ||
401 | p += sprintf(p, "%s", (val>>14)&1 ? ", diff clock" :""); | ||
402 | p += sprintf(p, "%s", (val>>13)&1 ? ", DLL bypass" :""); | ||
403 | |||
404 | p += sprintf(p,"\n"); | ||
405 | |||
406 | len = strlen(buf); | ||
407 | if (len > left) | ||
408 | len = left; | ||
409 | if (copy_to_user(buffer, buf, len)) | ||
410 | return -EFAULT; | ||
411 | left -= len; | ||
412 | buffer += len; | ||
413 | break; | ||
414 | } | ||
415 | } | ||
416 | |||
417 | if (!write && !first && left) { | ||
418 | if(put_user('\n', (char __user *) buffer)) | ||
419 | return -EFAULT; | ||
420 | left--, buffer++; | ||
421 | } | ||
422 | if (write) { | ||
423 | char __user *s = (char __user *) buffer; | ||
424 | while (left) { | ||
425 | char c; | ||
426 | if(get_user(c, s++)) | ||
427 | return -EFAULT; | ||
428 | if (!isspace(c)) | ||
429 | break; | ||
430 | left--; | ||
431 | } | ||
432 | } | ||
433 | if (write && first) | ||
434 | return -EINVAL; | ||
435 | *lenp -= left; | ||
436 | *ppos += *lenp; | ||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | #ifdef CONFIG_SYSCTL | ||
441 | /* | ||
442 | * Register our sysctl. | ||
443 | */ | ||
444 | static ctl_table htab_ctl_table[]={ | ||
445 | { | ||
446 | .ctl_name = KERN_PPC_L2CR, | ||
447 | .procname = "l2cr", | ||
448 | .mode = 0644, | ||
449 | .proc_handler = &proc_dol2crvec, | ||
450 | }, | ||
451 | { 0, }, | ||
452 | }; | ||
453 | static ctl_table htab_sysctl_root[] = { | ||
454 | { 1, "kernel", NULL, 0, 0755, htab_ctl_table, }, | ||
455 | { 0,}, | ||
456 | }; | ||
457 | |||
458 | static int __init | ||
459 | register_ppc_htab_sysctl(void) | ||
460 | { | ||
461 | register_sysctl_table(htab_sysctl_root, 0); | ||
462 | |||
463 | return 0; | ||
464 | } | ||
465 | |||
466 | __initcall(register_ppc_htab_sysctl); | ||
467 | #endif | ||
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c new file mode 100644 index 000000000000..2ccb58fe4fc3 --- /dev/null +++ b/arch/ppc/kernel/ppc_ksyms.c | |||
@@ -0,0 +1,350 @@ | |||
1 | #include <linux/config.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/threads.h> | ||
4 | #include <linux/smp.h> | ||
5 | #include <linux/sched.h> | ||
6 | #include <linux/elfcore.h> | ||
7 | #include <linux/string.h> | ||
8 | #include <linux/interrupt.h> | ||
9 | #include <linux/tty.h> | ||
10 | #include <linux/vt_kern.h> | ||
11 | #include <linux/nvram.h> | ||
12 | #include <linux/console.h> | ||
13 | #include <linux/irq.h> | ||
14 | #include <linux/pci.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/ide.h> | ||
17 | #include <linux/pm.h> | ||
18 | #include <linux/bitops.h> | ||
19 | |||
20 | #include <asm/page.h> | ||
21 | #include <asm/semaphore.h> | ||
22 | #include <asm/processor.h> | ||
23 | #include <asm/uaccess.h> | ||
24 | #include <asm/io.h> | ||
25 | #include <asm/ide.h> | ||
26 | #include <asm/atomic.h> | ||
27 | #include <asm/checksum.h> | ||
28 | #include <asm/pgtable.h> | ||
29 | #include <asm/tlbflush.h> | ||
30 | #include <linux/adb.h> | ||
31 | #include <linux/cuda.h> | ||
32 | #include <linux/pmu.h> | ||
33 | #include <asm/prom.h> | ||
34 | #include <asm/system.h> | ||
35 | #include <asm/pci-bridge.h> | ||
36 | #include <asm/irq.h> | ||
37 | #include <asm/pmac_feature.h> | ||
38 | #include <asm/dma.h> | ||
39 | #include <asm/machdep.h> | ||
40 | #include <asm/hw_irq.h> | ||
41 | #include <asm/nvram.h> | ||
42 | #include <asm/mmu_context.h> | ||
43 | #include <asm/backlight.h> | ||
44 | #include <asm/time.h> | ||
45 | #include <asm/cputable.h> | ||
46 | #include <asm/btext.h> | ||
47 | #include <asm/div64.h> | ||
48 | #include <asm/xmon.h> | ||
49 | |||
50 | #ifdef CONFIG_8xx | ||
51 | #include <asm/commproc.h> | ||
52 | #endif | ||
53 | |||
54 | /* Tell string.h we don't want memcpy etc. as cpp defines */ | ||
55 | #define EXPORT_SYMTAB_STROPS | ||
56 | |||
57 | extern void transfer_to_handler(void); | ||
58 | extern void do_syscall_trace(void); | ||
59 | extern void do_IRQ(struct pt_regs *regs); | ||
60 | extern void MachineCheckException(struct pt_regs *regs); | ||
61 | extern void AlignmentException(struct pt_regs *regs); | ||
62 | extern void ProgramCheckException(struct pt_regs *regs); | ||
63 | extern void SingleStepException(struct pt_regs *regs); | ||
64 | extern int do_signal(sigset_t *, struct pt_regs *); | ||
65 | extern int pmac_newworld; | ||
66 | extern int sys_sigreturn(struct pt_regs *regs); | ||
67 | |||
68 | long long __ashrdi3(long long, int); | ||
69 | long long __ashldi3(long long, int); | ||
70 | long long __lshrdi3(long long, int); | ||
71 | |||
72 | extern unsigned long mm_ptov (unsigned long paddr); | ||
73 | |||
74 | EXPORT_SYMBOL(clear_pages); | ||
75 | EXPORT_SYMBOL(clear_user_page); | ||
76 | EXPORT_SYMBOL(do_signal); | ||
77 | EXPORT_SYMBOL(do_syscall_trace); | ||
78 | EXPORT_SYMBOL(transfer_to_handler); | ||
79 | EXPORT_SYMBOL(do_IRQ); | ||
80 | EXPORT_SYMBOL(MachineCheckException); | ||
81 | EXPORT_SYMBOL(AlignmentException); | ||
82 | EXPORT_SYMBOL(ProgramCheckException); | ||
83 | EXPORT_SYMBOL(SingleStepException); | ||
84 | EXPORT_SYMBOL(sys_sigreturn); | ||
85 | EXPORT_SYMBOL(ppc_n_lost_interrupts); | ||
86 | EXPORT_SYMBOL(ppc_lost_interrupts); | ||
87 | |||
88 | EXPORT_SYMBOL(ISA_DMA_THRESHOLD); | ||
89 | EXPORT_SYMBOL(DMA_MODE_READ); | ||
90 | EXPORT_SYMBOL(DMA_MODE_WRITE); | ||
91 | #if defined(CONFIG_PPC_PREP) | ||
92 | EXPORT_SYMBOL(_prep_type); | ||
93 | EXPORT_SYMBOL(ucSystemType); | ||
94 | #endif | ||
95 | |||
96 | #if !defined(__INLINE_BITOPS) | ||
97 | EXPORT_SYMBOL(set_bit); | ||
98 | EXPORT_SYMBOL(clear_bit); | ||
99 | EXPORT_SYMBOL(change_bit); | ||
100 | EXPORT_SYMBOL(test_and_set_bit); | ||
101 | EXPORT_SYMBOL(test_and_clear_bit); | ||
102 | EXPORT_SYMBOL(test_and_change_bit); | ||
103 | #endif /* __INLINE_BITOPS */ | ||
104 | |||
105 | EXPORT_SYMBOL(strcpy); | ||
106 | EXPORT_SYMBOL(strncpy); | ||
107 | EXPORT_SYMBOL(strcat); | ||
108 | EXPORT_SYMBOL(strncat); | ||
109 | EXPORT_SYMBOL(strchr); | ||
110 | EXPORT_SYMBOL(strrchr); | ||
111 | EXPORT_SYMBOL(strpbrk); | ||
112 | EXPORT_SYMBOL(strstr); | ||
113 | EXPORT_SYMBOL(strlen); | ||
114 | EXPORT_SYMBOL(strnlen); | ||
115 | EXPORT_SYMBOL(strcmp); | ||
116 | EXPORT_SYMBOL(strncmp); | ||
117 | EXPORT_SYMBOL(strcasecmp); | ||
118 | EXPORT_SYMBOL(__div64_32); | ||
119 | |||
120 | EXPORT_SYMBOL(csum_partial); | ||
121 | EXPORT_SYMBOL(csum_partial_copy_generic); | ||
122 | EXPORT_SYMBOL(ip_fast_csum); | ||
123 | EXPORT_SYMBOL(csum_tcpudp_magic); | ||
124 | |||
125 | EXPORT_SYMBOL(__copy_tofrom_user); | ||
126 | EXPORT_SYMBOL(__clear_user); | ||
127 | EXPORT_SYMBOL(__strncpy_from_user); | ||
128 | EXPORT_SYMBOL(__strnlen_user); | ||
129 | |||
130 | /* | ||
131 | EXPORT_SYMBOL(inb); | ||
132 | EXPORT_SYMBOL(inw); | ||
133 | EXPORT_SYMBOL(inl); | ||
134 | EXPORT_SYMBOL(outb); | ||
135 | EXPORT_SYMBOL(outw); | ||
136 | EXPORT_SYMBOL(outl); | ||
137 | EXPORT_SYMBOL(outsl);*/ | ||
138 | |||
139 | EXPORT_SYMBOL(_insb); | ||
140 | EXPORT_SYMBOL(_outsb); | ||
141 | EXPORT_SYMBOL(_insw); | ||
142 | EXPORT_SYMBOL(_outsw); | ||
143 | EXPORT_SYMBOL(_insl); | ||
144 | EXPORT_SYMBOL(_outsl); | ||
145 | EXPORT_SYMBOL(_insw_ns); | ||
146 | EXPORT_SYMBOL(_outsw_ns); | ||
147 | EXPORT_SYMBOL(_insl_ns); | ||
148 | EXPORT_SYMBOL(_outsl_ns); | ||
149 | EXPORT_SYMBOL(iopa); | ||
150 | EXPORT_SYMBOL(mm_ptov); | ||
151 | EXPORT_SYMBOL(ioremap); | ||
152 | #ifdef CONFIG_44x | ||
153 | EXPORT_SYMBOL(ioremap64); | ||
154 | #endif | ||
155 | EXPORT_SYMBOL(__ioremap); | ||
156 | EXPORT_SYMBOL(iounmap); | ||
157 | EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ | ||
158 | |||
159 | #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) | ||
160 | EXPORT_SYMBOL(ppc_ide_md); | ||
161 | #endif | ||
162 | |||
163 | #ifdef CONFIG_PCI | ||
164 | EXPORT_SYMBOL(isa_io_base); | ||
165 | EXPORT_SYMBOL(isa_mem_base); | ||
166 | EXPORT_SYMBOL(pci_dram_offset); | ||
167 | EXPORT_SYMBOL(pci_alloc_consistent); | ||
168 | EXPORT_SYMBOL(pci_free_consistent); | ||
169 | EXPORT_SYMBOL(pci_bus_io_base); | ||
170 | EXPORT_SYMBOL(pci_bus_io_base_phys); | ||
171 | EXPORT_SYMBOL(pci_bus_mem_base_phys); | ||
172 | EXPORT_SYMBOL(pci_bus_to_hose); | ||
173 | EXPORT_SYMBOL(pci_resource_to_bus); | ||
174 | EXPORT_SYMBOL(pci_phys_to_bus); | ||
175 | EXPORT_SYMBOL(pci_bus_to_phys); | ||
176 | #endif /* CONFIG_PCI */ | ||
177 | |||
178 | #ifdef CONFIG_NOT_COHERENT_CACHE | ||
179 | EXPORT_SYMBOL(flush_dcache_all); | ||
180 | #endif | ||
181 | |||
182 | EXPORT_SYMBOL(start_thread); | ||
183 | EXPORT_SYMBOL(kernel_thread); | ||
184 | |||
185 | EXPORT_SYMBOL(flush_instruction_cache); | ||
186 | EXPORT_SYMBOL(giveup_fpu); | ||
187 | EXPORT_SYMBOL(flush_icache_range); | ||
188 | EXPORT_SYMBOL(flush_dcache_range); | ||
189 | EXPORT_SYMBOL(flush_icache_user_range); | ||
190 | EXPORT_SYMBOL(flush_dcache_page); | ||
191 | EXPORT_SYMBOL(flush_tlb_kernel_range); | ||
192 | EXPORT_SYMBOL(flush_tlb_page); | ||
193 | EXPORT_SYMBOL(_tlbie); | ||
194 | #ifdef CONFIG_ALTIVEC | ||
195 | EXPORT_SYMBOL(last_task_used_altivec); | ||
196 | EXPORT_SYMBOL(giveup_altivec); | ||
197 | #endif /* CONFIG_ALTIVEC */ | ||
198 | #ifdef CONFIG_SPE | ||
199 | EXPORT_SYMBOL(last_task_used_spe); | ||
200 | EXPORT_SYMBOL(giveup_spe); | ||
201 | #endif /* CONFIG_SPE */ | ||
202 | #ifdef CONFIG_SMP | ||
203 | EXPORT_SYMBOL(smp_call_function); | ||
204 | EXPORT_SYMBOL(smp_hw_index); | ||
205 | #endif | ||
206 | |||
207 | EXPORT_SYMBOL(ppc_md); | ||
208 | |||
209 | #ifdef CONFIG_ADB | ||
210 | EXPORT_SYMBOL(adb_request); | ||
211 | EXPORT_SYMBOL(adb_register); | ||
212 | EXPORT_SYMBOL(adb_unregister); | ||
213 | EXPORT_SYMBOL(adb_poll); | ||
214 | EXPORT_SYMBOL(adb_try_handler_change); | ||
215 | #endif /* CONFIG_ADB */ | ||
216 | #ifdef CONFIG_ADB_CUDA | ||
217 | EXPORT_SYMBOL(cuda_request); | ||
218 | EXPORT_SYMBOL(cuda_poll); | ||
219 | #endif /* CONFIG_ADB_CUDA */ | ||
220 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
221 | EXPORT_SYMBOL(_machine); | ||
222 | #endif | ||
223 | #ifdef CONFIG_PPC_PMAC | ||
224 | EXPORT_SYMBOL(sys_ctrler); | ||
225 | EXPORT_SYMBOL(pmac_newworld); | ||
226 | #endif | ||
227 | #ifdef CONFIG_PPC_OF | ||
228 | EXPORT_SYMBOL(find_devices); | ||
229 | EXPORT_SYMBOL(find_type_devices); | ||
230 | EXPORT_SYMBOL(find_compatible_devices); | ||
231 | EXPORT_SYMBOL(find_path_device); | ||
232 | EXPORT_SYMBOL(device_is_compatible); | ||
233 | EXPORT_SYMBOL(machine_is_compatible); | ||
234 | EXPORT_SYMBOL(find_all_nodes); | ||
235 | EXPORT_SYMBOL(get_property); | ||
236 | EXPORT_SYMBOL(request_OF_resource); | ||
237 | EXPORT_SYMBOL(release_OF_resource); | ||
238 | EXPORT_SYMBOL(pci_busdev_to_OF_node); | ||
239 | EXPORT_SYMBOL(pci_device_to_OF_node); | ||
240 | EXPORT_SYMBOL(pci_device_from_OF_node); | ||
241 | EXPORT_SYMBOL(of_find_node_by_name); | ||
242 | EXPORT_SYMBOL(of_find_node_by_type); | ||
243 | EXPORT_SYMBOL(of_find_compatible_node); | ||
244 | EXPORT_SYMBOL(of_find_node_by_path); | ||
245 | EXPORT_SYMBOL(of_find_all_nodes); | ||
246 | EXPORT_SYMBOL(of_get_parent); | ||
247 | EXPORT_SYMBOL(of_get_next_child); | ||
248 | EXPORT_SYMBOL(of_node_get); | ||
249 | EXPORT_SYMBOL(of_node_put); | ||
250 | #endif /* CONFIG_PPC_OF */ | ||
251 | #if defined(CONFIG_BOOTX_TEXT) | ||
252 | EXPORT_SYMBOL(btext_update_display); | ||
253 | #endif | ||
254 | #if defined(CONFIG_SCSI) && defined(CONFIG_PPC_PMAC) | ||
255 | EXPORT_SYMBOL(note_scsi_host); | ||
256 | #endif | ||
257 | #ifdef CONFIG_VT | ||
258 | EXPORT_SYMBOL(kd_mksound); | ||
259 | #endif | ||
260 | EXPORT_SYMBOL(to_tm); | ||
261 | |||
262 | EXPORT_SYMBOL(pm_power_off); | ||
263 | |||
264 | EXPORT_SYMBOL(__ashrdi3); | ||
265 | EXPORT_SYMBOL(__ashldi3); | ||
266 | EXPORT_SYMBOL(__lshrdi3); | ||
267 | EXPORT_SYMBOL(memcpy); | ||
268 | EXPORT_SYMBOL(memset); | ||
269 | EXPORT_SYMBOL(memmove); | ||
270 | EXPORT_SYMBOL(memscan); | ||
271 | EXPORT_SYMBOL(memcmp); | ||
272 | EXPORT_SYMBOL(memchr); | ||
273 | |||
274 | #if defined(CONFIG_FB_VGA16_MODULE) | ||
275 | EXPORT_SYMBOL(screen_info); | ||
276 | #endif | ||
277 | |||
278 | EXPORT_SYMBOL(__delay); | ||
279 | #ifndef INLINE_IRQS | ||
280 | EXPORT_SYMBOL(local_irq_enable); | ||
281 | EXPORT_SYMBOL(local_irq_enable_end); | ||
282 | EXPORT_SYMBOL(local_irq_disable); | ||
283 | EXPORT_SYMBOL(local_irq_disable_end); | ||
284 | EXPORT_SYMBOL(local_save_flags_ptr); | ||
285 | EXPORT_SYMBOL(local_save_flags_ptr_end); | ||
286 | EXPORT_SYMBOL(local_irq_restore); | ||
287 | EXPORT_SYMBOL(local_irq_restore_end); | ||
288 | #endif | ||
289 | EXPORT_SYMBOL(timer_interrupt); | ||
290 | EXPORT_SYMBOL(irq_desc); | ||
291 | EXPORT_SYMBOL(tb_ticks_per_jiffy); | ||
292 | EXPORT_SYMBOL(get_wchan); | ||
293 | EXPORT_SYMBOL(console_drivers); | ||
294 | #ifdef CONFIG_XMON | ||
295 | EXPORT_SYMBOL(xmon); | ||
296 | EXPORT_SYMBOL(xmon_printf); | ||
297 | #endif | ||
298 | EXPORT_SYMBOL(__up); | ||
299 | EXPORT_SYMBOL(__down); | ||
300 | EXPORT_SYMBOL(__down_interruptible); | ||
301 | |||
302 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) | ||
303 | extern void (*debugger)(struct pt_regs *regs); | ||
304 | extern int (*debugger_bpt)(struct pt_regs *regs); | ||
305 | extern int (*debugger_sstep)(struct pt_regs *regs); | ||
306 | extern int (*debugger_iabr_match)(struct pt_regs *regs); | ||
307 | extern int (*debugger_dabr_match)(struct pt_regs *regs); | ||
308 | extern void (*debugger_fault_handler)(struct pt_regs *regs); | ||
309 | |||
310 | EXPORT_SYMBOL(debugger); | ||
311 | EXPORT_SYMBOL(debugger_bpt); | ||
312 | EXPORT_SYMBOL(debugger_sstep); | ||
313 | EXPORT_SYMBOL(debugger_iabr_match); | ||
314 | EXPORT_SYMBOL(debugger_dabr_match); | ||
315 | EXPORT_SYMBOL(debugger_fault_handler); | ||
316 | #endif | ||
317 | |||
318 | #ifdef CONFIG_8xx | ||
319 | EXPORT_SYMBOL(cpm_install_handler); | ||
320 | EXPORT_SYMBOL(cpm_free_handler); | ||
321 | #endif /* CONFIG_8xx */ | ||
322 | #if defined(CONFIG_8xx) || defined(CONFIG_40x) || defined(CONFIG_85xx) ||\ | ||
323 | defined(CONFIG_83xx) | ||
324 | EXPORT_SYMBOL(__res); | ||
325 | #endif | ||
326 | |||
327 | EXPORT_SYMBOL(next_mmu_context); | ||
328 | EXPORT_SYMBOL(set_context); | ||
329 | EXPORT_SYMBOL(handle_mm_fault); /* For MOL */ | ||
330 | EXPORT_SYMBOL(disarm_decr); | ||
331 | #ifdef CONFIG_PPC_STD_MMU | ||
332 | extern long mol_trampoline; | ||
333 | EXPORT_SYMBOL(mol_trampoline); /* For MOL */ | ||
334 | EXPORT_SYMBOL(flush_hash_pages); /* For MOL */ | ||
335 | #ifdef CONFIG_SMP | ||
336 | extern int mmu_hash_lock; | ||
337 | EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */ | ||
338 | #endif /* CONFIG_SMP */ | ||
339 | extern long *intercept_table; | ||
340 | EXPORT_SYMBOL(intercept_table); | ||
341 | #endif /* CONFIG_PPC_STD_MMU */ | ||
342 | EXPORT_SYMBOL(cur_cpu_spec); | ||
343 | #ifdef CONFIG_PPC_PMAC | ||
344 | extern unsigned long agp_special_page; | ||
345 | EXPORT_SYMBOL(agp_special_page); | ||
346 | #endif | ||
347 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | ||
348 | EXPORT_SYMBOL(__mtdcr); | ||
349 | EXPORT_SYMBOL(__mfdcr); | ||
350 | #endif | ||
diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c new file mode 100644 index 000000000000..82de66e4db6d --- /dev/null +++ b/arch/ppc/kernel/process.c | |||
@@ -0,0 +1,781 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/process.c | ||
3 | * | ||
4 | * Derived from "arch/i386/kernel/process.c" | ||
5 | * Copyright (C) 1995 Linus Torvalds | ||
6 | * | ||
7 | * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and | ||
8 | * Paul Mackerras (paulus@cs.anu.edu.au) | ||
9 | * | ||
10 | * PowerPC version | ||
11 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or | ||
14 | * modify it under the terms of the GNU General Public License | ||
15 | * as published by the Free Software Foundation; either version | ||
16 | * 2 of the License, or (at your option) any later version. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/config.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/smp.h> | ||
26 | #include <linux/smp_lock.h> | ||
27 | #include <linux/stddef.h> | ||
28 | #include <linux/unistd.h> | ||
29 | #include <linux/ptrace.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/user.h> | ||
32 | #include <linux/elf.h> | ||
33 | #include <linux/init.h> | ||
34 | #include <linux/prctl.h> | ||
35 | #include <linux/init_task.h> | ||
36 | #include <linux/module.h> | ||
37 | #include <linux/kallsyms.h> | ||
38 | #include <linux/mqueue.h> | ||
39 | #include <linux/hardirq.h> | ||
40 | |||
41 | #include <asm/pgtable.h> | ||
42 | #include <asm/uaccess.h> | ||
43 | #include <asm/system.h> | ||
44 | #include <asm/io.h> | ||
45 | #include <asm/processor.h> | ||
46 | #include <asm/mmu.h> | ||
47 | #include <asm/prom.h> | ||
48 | |||
49 | extern unsigned long _get_SP(void); | ||
50 | |||
51 | struct task_struct *last_task_used_math = NULL; | ||
52 | struct task_struct *last_task_used_altivec = NULL; | ||
53 | struct task_struct *last_task_used_spe = NULL; | ||
54 | |||
55 | static struct fs_struct init_fs = INIT_FS; | ||
56 | static struct files_struct init_files = INIT_FILES; | ||
57 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | ||
58 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | ||
59 | struct mm_struct init_mm = INIT_MM(init_mm); | ||
60 | EXPORT_SYMBOL(init_mm); | ||
61 | |||
62 | /* this is 8kB-aligned so we can get to the thread_info struct | ||
63 | at the base of it from the stack pointer with 1 integer instruction. */ | ||
64 | union thread_union init_thread_union | ||
65 | __attribute__((__section__(".data.init_task"))) = | ||
66 | { INIT_THREAD_INFO(init_task) }; | ||
67 | |||
68 | /* initial task structure */ | ||
69 | struct task_struct init_task = INIT_TASK(init_task); | ||
70 | EXPORT_SYMBOL(init_task); | ||
71 | |||
72 | /* only used to get secondary processor up */ | ||
73 | struct task_struct *current_set[NR_CPUS] = {&init_task, }; | ||
74 | |||
75 | #undef SHOW_TASK_SWITCHES | ||
76 | #undef CHECK_STACK | ||
77 | |||
78 | #if defined(CHECK_STACK) | ||
79 | unsigned long | ||
80 | kernel_stack_top(struct task_struct *tsk) | ||
81 | { | ||
82 | return ((unsigned long)tsk) + sizeof(union task_union); | ||
83 | } | ||
84 | |||
85 | unsigned long | ||
86 | task_top(struct task_struct *tsk) | ||
87 | { | ||
88 | return ((unsigned long)tsk) + sizeof(struct thread_info); | ||
89 | } | ||
90 | |||
91 | /* check to make sure the kernel stack is healthy */ | ||
92 | int check_stack(struct task_struct *tsk) | ||
93 | { | ||
94 | unsigned long stack_top = kernel_stack_top(tsk); | ||
95 | unsigned long tsk_top = task_top(tsk); | ||
96 | int ret = 0; | ||
97 | |||
98 | #if 0 | ||
99 | /* check thread magic */ | ||
100 | if ( tsk->thread.magic != THREAD_MAGIC ) | ||
101 | { | ||
102 | ret |= 1; | ||
103 | printk("thread.magic bad: %08x\n", tsk->thread.magic); | ||
104 | } | ||
105 | #endif | ||
106 | |||
107 | if ( !tsk ) | ||
108 | printk("check_stack(): tsk bad tsk %p\n",tsk); | ||
109 | |||
110 | /* check if stored ksp is bad */ | ||
111 | if ( (tsk->thread.ksp > stack_top) || (tsk->thread.ksp < tsk_top) ) | ||
112 | { | ||
113 | printk("stack out of bounds: %s/%d\n" | ||
114 | " tsk_top %08lx ksp %08lx stack_top %08lx\n", | ||
115 | tsk->comm,tsk->pid, | ||
116 | tsk_top, tsk->thread.ksp, stack_top); | ||
117 | ret |= 2; | ||
118 | } | ||
119 | |||
120 | /* check if stack ptr RIGHT NOW is bad */ | ||
121 | if ( (tsk == current) && ((_get_SP() > stack_top ) || (_get_SP() < tsk_top)) ) | ||
122 | { | ||
123 | printk("current stack ptr out of bounds: %s/%d\n" | ||
124 | " tsk_top %08lx sp %08lx stack_top %08lx\n", | ||
125 | current->comm,current->pid, | ||
126 | tsk_top, _get_SP(), stack_top); | ||
127 | ret |= 4; | ||
128 | } | ||
129 | |||
130 | #if 0 | ||
131 | /* check amount of free stack */ | ||
132 | for ( i = (unsigned long *)task_top(tsk) ; i < kernel_stack_top(tsk) ; i++ ) | ||
133 | { | ||
134 | if ( !i ) | ||
135 | printk("check_stack(): i = %p\n", i); | ||
136 | if ( *i != 0 ) | ||
137 | { | ||
138 | /* only notify if it's less than 900 bytes */ | ||
139 | if ( (i - (unsigned long *)task_top(tsk)) < 900 ) | ||
140 | printk("%d bytes free on stack\n", | ||
141 | i - task_top(tsk)); | ||
142 | break; | ||
143 | } | ||
144 | } | ||
145 | #endif | ||
146 | |||
147 | if (ret) | ||
148 | { | ||
149 | panic("bad kernel stack"); | ||
150 | } | ||
151 | return(ret); | ||
152 | } | ||
153 | #endif /* defined(CHECK_STACK) */ | ||
154 | |||
155 | #ifdef CONFIG_ALTIVEC | ||
156 | int | ||
157 | dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs) | ||
158 | { | ||
159 | if (regs->msr & MSR_VEC) | ||
160 | giveup_altivec(current); | ||
161 | memcpy(vrregs, ¤t->thread.vr[0], sizeof(*vrregs)); | ||
162 | return 1; | ||
163 | } | ||
164 | |||
165 | void | ||
166 | enable_kernel_altivec(void) | ||
167 | { | ||
168 | WARN_ON(preemptible()); | ||
169 | |||
170 | #ifdef CONFIG_SMP | ||
171 | if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) | ||
172 | giveup_altivec(current); | ||
173 | else | ||
174 | giveup_altivec(NULL); /* just enable AltiVec for kernel - force */ | ||
175 | #else | ||
176 | giveup_altivec(last_task_used_altivec); | ||
177 | #endif /* __SMP __ */ | ||
178 | } | ||
179 | EXPORT_SYMBOL(enable_kernel_altivec); | ||
180 | #endif /* CONFIG_ALTIVEC */ | ||
181 | |||
182 | #ifdef CONFIG_SPE | ||
183 | int | ||
184 | dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) | ||
185 | { | ||
186 | if (regs->msr & MSR_SPE) | ||
187 | giveup_spe(current); | ||
188 | /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */ | ||
189 | memcpy(evrregs, ¤t->thread.evr[0], sizeof(u32) * 35); | ||
190 | return 1; | ||
191 | } | ||
192 | |||
193 | void | ||
194 | enable_kernel_spe(void) | ||
195 | { | ||
196 | WARN_ON(preemptible()); | ||
197 | |||
198 | #ifdef CONFIG_SMP | ||
199 | if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) | ||
200 | giveup_spe(current); | ||
201 | else | ||
202 | giveup_spe(NULL); /* just enable SPE for kernel - force */ | ||
203 | #else | ||
204 | giveup_spe(last_task_used_spe); | ||
205 | #endif /* __SMP __ */ | ||
206 | } | ||
207 | EXPORT_SYMBOL(enable_kernel_spe); | ||
208 | #endif /* CONFIG_SPE */ | ||
209 | |||
210 | void | ||
211 | enable_kernel_fp(void) | ||
212 | { | ||
213 | WARN_ON(preemptible()); | ||
214 | |||
215 | #ifdef CONFIG_SMP | ||
216 | if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) | ||
217 | giveup_fpu(current); | ||
218 | else | ||
219 | giveup_fpu(NULL); /* just enables FP for kernel */ | ||
220 | #else | ||
221 | giveup_fpu(last_task_used_math); | ||
222 | #endif /* CONFIG_SMP */ | ||
223 | } | ||
224 | EXPORT_SYMBOL(enable_kernel_fp); | ||
225 | |||
226 | int | ||
227 | dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) | ||
228 | { | ||
229 | preempt_disable(); | ||
230 | if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP)) | ||
231 | giveup_fpu(tsk); | ||
232 | preempt_enable(); | ||
233 | memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs)); | ||
234 | return 1; | ||
235 | } | ||
236 | |||
237 | struct task_struct *__switch_to(struct task_struct *prev, | ||
238 | struct task_struct *new) | ||
239 | { | ||
240 | struct thread_struct *new_thread, *old_thread; | ||
241 | unsigned long s; | ||
242 | struct task_struct *last; | ||
243 | |||
244 | local_irq_save(s); | ||
245 | #ifdef CHECK_STACK | ||
246 | check_stack(prev); | ||
247 | check_stack(new); | ||
248 | #endif | ||
249 | |||
250 | #ifdef CONFIG_SMP | ||
251 | /* avoid complexity of lazy save/restore of fpu | ||
252 | * by just saving it every time we switch out if | ||
253 | * this task used the fpu during the last quantum. | ||
254 | * | ||
255 | * If it tries to use the fpu again, it'll trap and | ||
256 | * reload its fp regs. So we don't have to do a restore | ||
257 | * every switch, just a save. | ||
258 | * -- Cort | ||
259 | */ | ||
260 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) | ||
261 | giveup_fpu(prev); | ||
262 | #ifdef CONFIG_ALTIVEC | ||
263 | /* | ||
264 | * If the previous thread used altivec in the last quantum | ||
265 | * (thus changing altivec regs) then save them. | ||
266 | * We used to check the VRSAVE register but not all apps | ||
267 | * set it, so we don't rely on it now (and in fact we need | ||
268 | * to save & restore VSCR even if VRSAVE == 0). -- paulus | ||
269 | * | ||
270 | * On SMP we always save/restore altivec regs just to avoid the | ||
271 | * complexity of changing processors. | ||
272 | * -- Cort | ||
273 | */ | ||
274 | if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))) | ||
275 | giveup_altivec(prev); | ||
276 | #endif /* CONFIG_ALTIVEC */ | ||
277 | #ifdef CONFIG_SPE | ||
278 | /* | ||
279 | * If the previous thread used spe in the last quantum | ||
280 | * (thus changing spe regs) then save them. | ||
281 | * | ||
282 | * On SMP we always save/restore spe regs just to avoid the | ||
283 | * complexity of changing processors. | ||
284 | */ | ||
285 | if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) | ||
286 | giveup_spe(prev); | ||
287 | #endif /* CONFIG_SPE */ | ||
288 | #endif /* CONFIG_SMP */ | ||
289 | |||
290 | /* Avoid the trap. On smp this this never happens since | ||
291 | * we don't set last_task_used_altivec -- Cort | ||
292 | */ | ||
293 | if (new->thread.regs && last_task_used_altivec == new) | ||
294 | new->thread.regs->msr |= MSR_VEC; | ||
295 | #ifdef CONFIG_SPE | ||
296 | /* Avoid the trap. On smp this this never happens since | ||
297 | * we don't set last_task_used_spe | ||
298 | */ | ||
299 | if (new->thread.regs && last_task_used_spe == new) | ||
300 | new->thread.regs->msr |= MSR_SPE; | ||
301 | #endif /* CONFIG_SPE */ | ||
302 | new_thread = &new->thread; | ||
303 | old_thread = ¤t->thread; | ||
304 | last = _switch(old_thread, new_thread); | ||
305 | local_irq_restore(s); | ||
306 | return last; | ||
307 | } | ||
308 | |||
309 | void show_regs(struct pt_regs * regs) | ||
310 | { | ||
311 | int i, trap; | ||
312 | |||
313 | printk("NIP: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx %s\n", | ||
314 | regs->nip, regs->link, regs->gpr[1], regs, regs->trap, | ||
315 | print_tainted()); | ||
316 | printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n", | ||
317 | regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0, | ||
318 | regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0, | ||
319 | regs->msr&MSR_IR ? 1 : 0, | ||
320 | regs->msr&MSR_DR ? 1 : 0); | ||
321 | trap = TRAP(regs); | ||
322 | if (trap == 0x300 || trap == 0x600) | ||
323 | printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr); | ||
324 | printk("TASK = %p[%d] '%s' THREAD: %p\n", | ||
325 | current, current->pid, current->comm, current->thread_info); | ||
326 | printk("Last syscall: %ld ", current->thread.last_syscall); | ||
327 | |||
328 | #ifdef CONFIG_SMP | ||
329 | printk(" CPU: %d", smp_processor_id()); | ||
330 | #endif /* CONFIG_SMP */ | ||
331 | |||
332 | for (i = 0; i < 32; i++) { | ||
333 | long r; | ||
334 | if ((i % 8) == 0) | ||
335 | printk("\n" KERN_INFO "GPR%02d: ", i); | ||
336 | if (__get_user(r, ®s->gpr[i])) | ||
337 | break; | ||
338 | printk("%08lX ", r); | ||
339 | if (i == 12 && !FULL_REGS(regs)) | ||
340 | break; | ||
341 | } | ||
342 | printk("\n"); | ||
343 | #ifdef CONFIG_KALLSYMS | ||
344 | /* | ||
345 | * Lookup NIP late so we have the best change of getting the | ||
346 | * above info out without failing | ||
347 | */ | ||
348 | printk("NIP [%08lx] ", regs->nip); | ||
349 | print_symbol("%s\n", regs->nip); | ||
350 | printk("LR [%08lx] ", regs->link); | ||
351 | print_symbol("%s\n", regs->link); | ||
352 | #endif | ||
353 | show_stack(current, (unsigned long *) regs->gpr[1]); | ||
354 | } | ||
355 | |||
356 | void exit_thread(void) | ||
357 | { | ||
358 | if (last_task_used_math == current) | ||
359 | last_task_used_math = NULL; | ||
360 | if (last_task_used_altivec == current) | ||
361 | last_task_used_altivec = NULL; | ||
362 | #ifdef CONFIG_SPE | ||
363 | if (last_task_used_spe == current) | ||
364 | last_task_used_spe = NULL; | ||
365 | #endif | ||
366 | } | ||
367 | |||
368 | void flush_thread(void) | ||
369 | { | ||
370 | if (last_task_used_math == current) | ||
371 | last_task_used_math = NULL; | ||
372 | if (last_task_used_altivec == current) | ||
373 | last_task_used_altivec = NULL; | ||
374 | #ifdef CONFIG_SPE | ||
375 | if (last_task_used_spe == current) | ||
376 | last_task_used_spe = NULL; | ||
377 | #endif | ||
378 | } | ||
379 | |||
380 | void | ||
381 | release_thread(struct task_struct *t) | ||
382 | { | ||
383 | } | ||
384 | |||
385 | /* | ||
386 | * This gets called before we allocate a new thread and copy | ||
387 | * the current task into it. | ||
388 | */ | ||
389 | void prepare_to_copy(struct task_struct *tsk) | ||
390 | { | ||
391 | struct pt_regs *regs = tsk->thread.regs; | ||
392 | |||
393 | if (regs == NULL) | ||
394 | return; | ||
395 | preempt_disable(); | ||
396 | if (regs->msr & MSR_FP) | ||
397 | giveup_fpu(current); | ||
398 | #ifdef CONFIG_ALTIVEC | ||
399 | if (regs->msr & MSR_VEC) | ||
400 | giveup_altivec(current); | ||
401 | #endif /* CONFIG_ALTIVEC */ | ||
402 | #ifdef CONFIG_SPE | ||
403 | if (regs->msr & MSR_SPE) | ||
404 | giveup_spe(current); | ||
405 | #endif /* CONFIG_SPE */ | ||
406 | preempt_enable(); | ||
407 | } | ||
408 | |||
409 | /* | ||
410 | * Copy a thread.. | ||
411 | */ | ||
412 | int | ||
413 | copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | ||
414 | unsigned long unused, | ||
415 | struct task_struct *p, struct pt_regs *regs) | ||
416 | { | ||
417 | struct pt_regs *childregs, *kregs; | ||
418 | extern void ret_from_fork(void); | ||
419 | unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE; | ||
420 | unsigned long childframe; | ||
421 | |||
422 | CHECK_FULL_REGS(regs); | ||
423 | /* Copy registers */ | ||
424 | sp -= sizeof(struct pt_regs); | ||
425 | childregs = (struct pt_regs *) sp; | ||
426 | *childregs = *regs; | ||
427 | if ((childregs->msr & MSR_PR) == 0) { | ||
428 | /* for kernel thread, set `current' and stackptr in new task */ | ||
429 | childregs->gpr[1] = sp + sizeof(struct pt_regs); | ||
430 | childregs->gpr[2] = (unsigned long) p; | ||
431 | p->thread.regs = NULL; /* no user register state */ | ||
432 | } else { | ||
433 | childregs->gpr[1] = usp; | ||
434 | p->thread.regs = childregs; | ||
435 | if (clone_flags & CLONE_SETTLS) | ||
436 | childregs->gpr[2] = childregs->gpr[6]; | ||
437 | } | ||
438 | childregs->gpr[3] = 0; /* Result from fork() */ | ||
439 | sp -= STACK_FRAME_OVERHEAD; | ||
440 | childframe = sp; | ||
441 | |||
442 | /* | ||
443 | * The way this works is that at some point in the future | ||
444 | * some task will call _switch to switch to the new task. | ||
445 | * That will pop off the stack frame created below and start | ||
446 | * the new task running at ret_from_fork. The new task will | ||
447 | * do some house keeping and then return from the fork or clone | ||
448 | * system call, using the stack frame created above. | ||
449 | */ | ||
450 | sp -= sizeof(struct pt_regs); | ||
451 | kregs = (struct pt_regs *) sp; | ||
452 | sp -= STACK_FRAME_OVERHEAD; | ||
453 | p->thread.ksp = sp; | ||
454 | kregs->nip = (unsigned long)ret_from_fork; | ||
455 | |||
456 | p->thread.last_syscall = -1; | ||
457 | |||
458 | return 0; | ||
459 | } | ||
460 | |||
461 | /* | ||
462 | * Set up a thread for executing a new program | ||
463 | */ | ||
464 | void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp) | ||
465 | { | ||
466 | set_fs(USER_DS); | ||
467 | memset(regs->gpr, 0, sizeof(regs->gpr)); | ||
468 | regs->ctr = 0; | ||
469 | regs->link = 0; | ||
470 | regs->xer = 0; | ||
471 | regs->ccr = 0; | ||
472 | regs->mq = 0; | ||
473 | regs->nip = nip; | ||
474 | regs->gpr[1] = sp; | ||
475 | regs->msr = MSR_USER; | ||
476 | if (last_task_used_math == current) | ||
477 | last_task_used_math = NULL; | ||
478 | if (last_task_used_altivec == current) | ||
479 | last_task_used_altivec = NULL; | ||
480 | #ifdef CONFIG_SPE | ||
481 | if (last_task_used_spe == current) | ||
482 | last_task_used_spe = NULL; | ||
483 | #endif | ||
484 | memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); | ||
485 | current->thread.fpscr = 0; | ||
486 | #ifdef CONFIG_ALTIVEC | ||
487 | memset(current->thread.vr, 0, sizeof(current->thread.vr)); | ||
488 | memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr)); | ||
489 | current->thread.vrsave = 0; | ||
490 | current->thread.used_vr = 0; | ||
491 | #endif /* CONFIG_ALTIVEC */ | ||
492 | #ifdef CONFIG_SPE | ||
493 | memset(current->thread.evr, 0, sizeof(current->thread.evr)); | ||
494 | current->thread.acc = 0; | ||
495 | current->thread.spefscr = 0; | ||
496 | current->thread.used_spe = 0; | ||
497 | #endif /* CONFIG_SPE */ | ||
498 | } | ||
499 | |||
500 | #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ | ||
501 | | PR_FP_EXC_RES | PR_FP_EXC_INV) | ||
502 | |||
503 | int set_fpexc_mode(struct task_struct *tsk, unsigned int val) | ||
504 | { | ||
505 | struct pt_regs *regs = tsk->thread.regs; | ||
506 | |||
507 | /* This is a bit hairy. If we are an SPE enabled processor | ||
508 | * (have embedded fp) we store the IEEE exception enable flags in | ||
509 | * fpexc_mode. fpexc_mode is also used for setting FP exception | ||
510 | * mode (asyn, precise, disabled) for 'Classic' FP. */ | ||
511 | if (val & PR_FP_EXC_SW_ENABLE) { | ||
512 | #ifdef CONFIG_SPE | ||
513 | tsk->thread.fpexc_mode = val & | ||
514 | (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); | ||
515 | #else | ||
516 | return -EINVAL; | ||
517 | #endif | ||
518 | } else { | ||
519 | /* on a CONFIG_SPE this does not hurt us. The bits that | ||
520 | * __pack_fe01 use do not overlap with bits used for | ||
521 | * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits | ||
522 | * on CONFIG_SPE implementations are reserved so writing to | ||
523 | * them does not change anything */ | ||
524 | if (val > PR_FP_EXC_PRECISE) | ||
525 | return -EINVAL; | ||
526 | tsk->thread.fpexc_mode = __pack_fe01(val); | ||
527 | if (regs != NULL && (regs->msr & MSR_FP) != 0) | ||
528 | regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) | ||
529 | | tsk->thread.fpexc_mode; | ||
530 | } | ||
531 | return 0; | ||
532 | } | ||
533 | |||
534 | int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) | ||
535 | { | ||
536 | unsigned int val; | ||
537 | |||
538 | if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) | ||
539 | #ifdef CONFIG_SPE | ||
540 | val = tsk->thread.fpexc_mode; | ||
541 | #else | ||
542 | return -EINVAL; | ||
543 | #endif | ||
544 | else | ||
545 | val = __unpack_fe01(tsk->thread.fpexc_mode); | ||
546 | return put_user(val, (unsigned int __user *) adr); | ||
547 | } | ||
548 | |||
549 | int sys_clone(unsigned long clone_flags, unsigned long usp, | ||
550 | int __user *parent_tidp, void __user *child_threadptr, | ||
551 | int __user *child_tidp, int p6, | ||
552 | struct pt_regs *regs) | ||
553 | { | ||
554 | CHECK_FULL_REGS(regs); | ||
555 | if (usp == 0) | ||
556 | usp = regs->gpr[1]; /* stack pointer for child */ | ||
557 | return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp); | ||
558 | } | ||
559 | |||
560 | int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6, | ||
561 | struct pt_regs *regs) | ||
562 | { | ||
563 | CHECK_FULL_REGS(regs); | ||
564 | return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); | ||
565 | } | ||
566 | |||
567 | int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6, | ||
568 | struct pt_regs *regs) | ||
569 | { | ||
570 | CHECK_FULL_REGS(regs); | ||
571 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], | ||
572 | regs, 0, NULL, NULL); | ||
573 | } | ||
574 | |||
575 | int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, | ||
576 | unsigned long a3, unsigned long a4, unsigned long a5, | ||
577 | struct pt_regs *regs) | ||
578 | { | ||
579 | int error; | ||
580 | char * filename; | ||
581 | |||
582 | filename = getname((char __user *) a0); | ||
583 | error = PTR_ERR(filename); | ||
584 | if (IS_ERR(filename)) | ||
585 | goto out; | ||
586 | preempt_disable(); | ||
587 | if (regs->msr & MSR_FP) | ||
588 | giveup_fpu(current); | ||
589 | #ifdef CONFIG_ALTIVEC | ||
590 | if (regs->msr & MSR_VEC) | ||
591 | giveup_altivec(current); | ||
592 | #endif /* CONFIG_ALTIVEC */ | ||
593 | #ifdef CONFIG_SPE | ||
594 | if (regs->msr & MSR_SPE) | ||
595 | giveup_spe(current); | ||
596 | #endif /* CONFIG_SPE */ | ||
597 | preempt_enable(); | ||
598 | error = do_execve(filename, (char __user *__user *) a1, | ||
599 | (char __user *__user *) a2, regs); | ||
600 | if (error == 0) { | ||
601 | task_lock(current); | ||
602 | current->ptrace &= ~PT_DTRACE; | ||
603 | task_unlock(current); | ||
604 | } | ||
605 | putname(filename); | ||
606 | out: | ||
607 | return error; | ||
608 | } | ||
609 | |||
610 | void dump_stack(void) | ||
611 | { | ||
612 | show_stack(current, NULL); | ||
613 | } | ||
614 | |||
615 | EXPORT_SYMBOL(dump_stack); | ||
616 | |||
617 | void show_stack(struct task_struct *tsk, unsigned long *stack) | ||
618 | { | ||
619 | unsigned long sp, stack_top, prev_sp, ret; | ||
620 | int count = 0; | ||
621 | unsigned long next_exc = 0; | ||
622 | struct pt_regs *regs; | ||
623 | extern char ret_from_except, ret_from_except_full, ret_from_syscall; | ||
624 | |||
625 | sp = (unsigned long) stack; | ||
626 | if (tsk == NULL) | ||
627 | tsk = current; | ||
628 | if (sp == 0) { | ||
629 | if (tsk == current) | ||
630 | asm("mr %0,1" : "=r" (sp)); | ||
631 | else | ||
632 | sp = tsk->thread.ksp; | ||
633 | } | ||
634 | |||
635 | prev_sp = (unsigned long) (tsk->thread_info + 1); | ||
636 | stack_top = (unsigned long) tsk->thread_info + THREAD_SIZE; | ||
637 | while (count < 16 && sp > prev_sp && sp < stack_top && (sp & 3) == 0) { | ||
638 | if (count == 0) { | ||
639 | printk("Call trace:"); | ||
640 | #ifdef CONFIG_KALLSYMS | ||
641 | printk("\n"); | ||
642 | #endif | ||
643 | } else { | ||
644 | if (next_exc) { | ||
645 | ret = next_exc; | ||
646 | next_exc = 0; | ||
647 | } else | ||
648 | ret = *(unsigned long *)(sp + 4); | ||
649 | printk(" [%08lx] ", ret); | ||
650 | #ifdef CONFIG_KALLSYMS | ||
651 | print_symbol("%s", ret); | ||
652 | printk("\n"); | ||
653 | #endif | ||
654 | if (ret == (unsigned long) &ret_from_except | ||
655 | || ret == (unsigned long) &ret_from_except_full | ||
656 | || ret == (unsigned long) &ret_from_syscall) { | ||
657 | /* sp + 16 points to an exception frame */ | ||
658 | regs = (struct pt_regs *) (sp + 16); | ||
659 | if (sp + 16 + sizeof(*regs) <= stack_top) | ||
660 | next_exc = regs->nip; | ||
661 | } | ||
662 | } | ||
663 | ++count; | ||
664 | sp = *(unsigned long *)sp; | ||
665 | } | ||
666 | #ifndef CONFIG_KALLSYMS | ||
667 | if (count > 0) | ||
668 | printk("\n"); | ||
669 | #endif | ||
670 | } | ||
671 | |||
672 | #if 0 | ||
673 | /* | ||
674 | * Low level print for debugging - Cort | ||
675 | */ | ||
676 | int __init ll_printk(const char *fmt, ...) | ||
677 | { | ||
678 | va_list args; | ||
679 | char buf[256]; | ||
680 | int i; | ||
681 | |||
682 | va_start(args, fmt); | ||
683 | i=vsprintf(buf,fmt,args); | ||
684 | ll_puts(buf); | ||
685 | va_end(args); | ||
686 | return i; | ||
687 | } | ||
688 | |||
689 | int lines = 24, cols = 80; | ||
690 | int orig_x = 0, orig_y = 0; | ||
691 | |||
692 | void puthex(unsigned long val) | ||
693 | { | ||
694 | unsigned char buf[10]; | ||
695 | int i; | ||
696 | for (i = 7; i >= 0; i--) | ||
697 | { | ||
698 | buf[i] = "0123456789ABCDEF"[val & 0x0F]; | ||
699 | val >>= 4; | ||
700 | } | ||
701 | buf[8] = '\0'; | ||
702 | prom_print(buf); | ||
703 | } | ||
704 | |||
705 | void __init ll_puts(const char *s) | ||
706 | { | ||
707 | int x,y; | ||
708 | char *vidmem = (char *)/*(_ISA_MEM_BASE + 0xB8000) */0xD00B8000; | ||
709 | char c; | ||
710 | extern int mem_init_done; | ||
711 | |||
712 | if ( mem_init_done ) /* assume this means we can printk */ | ||
713 | { | ||
714 | printk(s); | ||
715 | return; | ||
716 | } | ||
717 | |||
718 | #if 0 | ||
719 | if ( have_of ) | ||
720 | { | ||
721 | prom_print(s); | ||
722 | return; | ||
723 | } | ||
724 | #endif | ||
725 | |||
726 | /* | ||
727 | * can't ll_puts on chrp without openfirmware yet. | ||
728 | * vidmem just needs to be setup for it. | ||
729 | * -- Cort | ||
730 | */ | ||
731 | if ( _machine != _MACH_prep ) | ||
732 | return; | ||
733 | x = orig_x; | ||
734 | y = orig_y; | ||
735 | |||
736 | while ( ( c = *s++ ) != '\0' ) { | ||
737 | if ( c == '\n' ) { | ||
738 | x = 0; | ||
739 | if ( ++y >= lines ) { | ||
740 | /*scroll();*/ | ||
741 | /*y--;*/ | ||
742 | y = 0; | ||
743 | } | ||
744 | } else { | ||
745 | vidmem [ ( x + cols * y ) * 2 ] = c; | ||
746 | if ( ++x >= cols ) { | ||
747 | x = 0; | ||
748 | if ( ++y >= lines ) { | ||
749 | /*scroll();*/ | ||
750 | /*y--;*/ | ||
751 | y = 0; | ||
752 | } | ||
753 | } | ||
754 | } | ||
755 | } | ||
756 | |||
757 | orig_x = x; | ||
758 | orig_y = y; | ||
759 | } | ||
760 | #endif | ||
761 | |||
762 | unsigned long get_wchan(struct task_struct *p) | ||
763 | { | ||
764 | unsigned long ip, sp; | ||
765 | unsigned long stack_page = (unsigned long) p->thread_info; | ||
766 | int count = 0; | ||
767 | if (!p || p == current || p->state == TASK_RUNNING) | ||
768 | return 0; | ||
769 | sp = p->thread.ksp; | ||
770 | do { | ||
771 | sp = *(unsigned long *)sp; | ||
772 | if (sp < stack_page || sp >= stack_page + 8188) | ||
773 | return 0; | ||
774 | if (count > 0) { | ||
775 | ip = *(unsigned long *)(sp + 4); | ||
776 | if (!in_sched_functions(ip)) | ||
777 | return ip; | ||
778 | } | ||
779 | } while (count++ < 16); | ||
780 | return 0; | ||
781 | } | ||
diff --git a/arch/ppc/kernel/ptrace.c b/arch/ppc/kernel/ptrace.c new file mode 100644 index 000000000000..426b6f7d9de3 --- /dev/null +++ b/arch/ppc/kernel/ptrace.c | |||
@@ -0,0 +1,474 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/ptrace.c | ||
3 | * | ||
4 | * PowerPC version | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * | ||
7 | * Derived from "arch/m68k/kernel/ptrace.c" | ||
8 | * Copyright (C) 1994 by Hamish Macdonald | ||
9 | * Taken from linux/kernel/ptrace.c and modified for M680x0. | ||
10 | * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds | ||
11 | * | ||
12 | * Modified by Cort Dougan (cort@hq.fsmlabs.com) | ||
13 | * and Paul Mackerras (paulus@linuxcare.com.au). | ||
14 | * | ||
15 | * This file is subject to the terms and conditions of the GNU General | ||
16 | * Public License. See the file README.legal in the main directory of | ||
17 | * this archive for more details. | ||
18 | */ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <linux/smp.h> | ||
24 | #include <linux/smp_lock.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/ptrace.h> | ||
27 | #include <linux/user.h> | ||
28 | #include <linux/security.h> | ||
29 | |||
30 | #include <asm/uaccess.h> | ||
31 | #include <asm/page.h> | ||
32 | #include <asm/pgtable.h> | ||
33 | #include <asm/system.h> | ||
34 | |||
35 | /* | ||
36 | * Set of msr bits that gdb can change on behalf of a process. | ||
37 | */ | ||
38 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | ||
39 | #define MSR_DEBUGCHANGE 0 | ||
40 | #else | ||
41 | #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE) | ||
42 | #endif | ||
43 | |||
44 | /* | ||
45 | * does not yet catch signals sent when the child dies. | ||
46 | * in exit.c or in signal.c. | ||
47 | */ | ||
48 | |||
49 | /* | ||
50 | * Get contents of register REGNO in task TASK. | ||
51 | */ | ||
52 | static inline unsigned long get_reg(struct task_struct *task, int regno) | ||
53 | { | ||
54 | if (regno < sizeof(struct pt_regs) / sizeof(unsigned long) | ||
55 | && task->thread.regs != NULL) | ||
56 | return ((unsigned long *)task->thread.regs)[regno]; | ||
57 | return (0); | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * Write contents of register REGNO in task TASK. | ||
62 | */ | ||
63 | static inline int put_reg(struct task_struct *task, int regno, | ||
64 | unsigned long data) | ||
65 | { | ||
66 | if (regno <= PT_MQ && task->thread.regs != NULL) { | ||
67 | if (regno == PT_MSR) | ||
68 | data = (data & MSR_DEBUGCHANGE) | ||
69 | | (task->thread.regs->msr & ~MSR_DEBUGCHANGE); | ||
70 | ((unsigned long *)task->thread.regs)[regno] = data; | ||
71 | return 0; | ||
72 | } | ||
73 | return -EIO; | ||
74 | } | ||
75 | |||
76 | #ifdef CONFIG_ALTIVEC | ||
77 | /* | ||
78 | * Get contents of AltiVec register state in task TASK | ||
79 | */ | ||
80 | static inline int get_vrregs(unsigned long __user *data, struct task_struct *task) | ||
81 | { | ||
82 | int i, j; | ||
83 | |||
84 | if (!access_ok(VERIFY_WRITE, data, 133 * sizeof(unsigned long))) | ||
85 | return -EFAULT; | ||
86 | |||
87 | /* copy AltiVec registers VR[0] .. VR[31] */ | ||
88 | for (i = 0; i < 32; i++) | ||
89 | for (j = 0; j < 4; j++, data++) | ||
90 | if (__put_user(task->thread.vr[i].u[j], data)) | ||
91 | return -EFAULT; | ||
92 | |||
93 | /* copy VSCR */ | ||
94 | for (i = 0; i < 4; i++, data++) | ||
95 | if (__put_user(task->thread.vscr.u[i], data)) | ||
96 | return -EFAULT; | ||
97 | |||
98 | /* copy VRSAVE */ | ||
99 | if (__put_user(task->thread.vrsave, data)) | ||
100 | return -EFAULT; | ||
101 | |||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Write contents of AltiVec register state into task TASK. | ||
107 | */ | ||
108 | static inline int set_vrregs(struct task_struct *task, unsigned long __user *data) | ||
109 | { | ||
110 | int i, j; | ||
111 | |||
112 | if (!access_ok(VERIFY_READ, data, 133 * sizeof(unsigned long))) | ||
113 | return -EFAULT; | ||
114 | |||
115 | /* copy AltiVec registers VR[0] .. VR[31] */ | ||
116 | for (i = 0; i < 32; i++) | ||
117 | for (j = 0; j < 4; j++, data++) | ||
118 | if (__get_user(task->thread.vr[i].u[j], data)) | ||
119 | return -EFAULT; | ||
120 | |||
121 | /* copy VSCR */ | ||
122 | for (i = 0; i < 4; i++, data++) | ||
123 | if (__get_user(task->thread.vscr.u[i], data)) | ||
124 | return -EFAULT; | ||
125 | |||
126 | /* copy VRSAVE */ | ||
127 | if (__get_user(task->thread.vrsave, data)) | ||
128 | return -EFAULT; | ||
129 | |||
130 | return 0; | ||
131 | } | ||
132 | #endif | ||
133 | |||
134 | #ifdef CONFIG_SPE | ||
135 | |||
136 | /* | ||
137 | * For get_evrregs/set_evrregs functions 'data' has the following layout: | ||
138 | * | ||
139 | * struct { | ||
140 | * u32 evr[32]; | ||
141 | * u64 acc; | ||
142 | * u32 spefscr; | ||
143 | * } | ||
144 | */ | ||
145 | |||
146 | /* | ||
147 | * Get contents of SPE register state in task TASK. | ||
148 | */ | ||
149 | static inline int get_evrregs(unsigned long *data, struct task_struct *task) | ||
150 | { | ||
151 | int i; | ||
152 | |||
153 | if (!access_ok(VERIFY_WRITE, data, 35 * sizeof(unsigned long))) | ||
154 | return -EFAULT; | ||
155 | |||
156 | /* copy SPEFSCR */ | ||
157 | if (__put_user(task->thread.spefscr, &data[34])) | ||
158 | return -EFAULT; | ||
159 | |||
160 | /* copy SPE registers EVR[0] .. EVR[31] */ | ||
161 | for (i = 0; i < 32; i++, data++) | ||
162 | if (__put_user(task->thread.evr[i], data)) | ||
163 | return -EFAULT; | ||
164 | |||
165 | /* copy ACC */ | ||
166 | if (__put_user64(task->thread.acc, (unsigned long long *)data)) | ||
167 | return -EFAULT; | ||
168 | |||
169 | return 0; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * Write contents of SPE register state into task TASK. | ||
174 | */ | ||
175 | static inline int set_evrregs(struct task_struct *task, unsigned long *data) | ||
176 | { | ||
177 | int i; | ||
178 | |||
179 | if (!access_ok(VERIFY_READ, data, 35 * sizeof(unsigned long))) | ||
180 | return -EFAULT; | ||
181 | |||
182 | /* copy SPEFSCR */ | ||
183 | if (__get_user(task->thread.spefscr, &data[34])) | ||
184 | return -EFAULT; | ||
185 | |||
186 | /* copy SPE registers EVR[0] .. EVR[31] */ | ||
187 | for (i = 0; i < 32; i++, data++) | ||
188 | if (__get_user(task->thread.evr[i], data)) | ||
189 | return -EFAULT; | ||
190 | /* copy ACC */ | ||
191 | if (__get_user64(task->thread.acc, (unsigned long long*)data)) | ||
192 | return -EFAULT; | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | #endif /* CONFIG_SPE */ | ||
197 | |||
198 | static inline void | ||
199 | set_single_step(struct task_struct *task) | ||
200 | { | ||
201 | struct pt_regs *regs = task->thread.regs; | ||
202 | |||
203 | if (regs != NULL) { | ||
204 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | ||
205 | task->thread.dbcr0 = DBCR0_IDM | DBCR0_IC; | ||
206 | regs->msr |= MSR_DE; | ||
207 | #else | ||
208 | regs->msr |= MSR_SE; | ||
209 | #endif | ||
210 | } | ||
211 | } | ||
212 | |||
213 | static inline void | ||
214 | clear_single_step(struct task_struct *task) | ||
215 | { | ||
216 | struct pt_regs *regs = task->thread.regs; | ||
217 | |||
218 | if (regs != NULL) { | ||
219 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | ||
220 | task->thread.dbcr0 = 0; | ||
221 | regs->msr &= ~MSR_DE; | ||
222 | #else | ||
223 | regs->msr &= ~MSR_SE; | ||
224 | #endif | ||
225 | } | ||
226 | } | ||
227 | |||
228 | /* | ||
229 | * Called by kernel/ptrace.c when detaching.. | ||
230 | * | ||
231 | * Make sure single step bits etc are not set. | ||
232 | */ | ||
233 | void ptrace_disable(struct task_struct *child) | ||
234 | { | ||
235 | /* make sure the single step bit is not set. */ | ||
236 | clear_single_step(child); | ||
237 | } | ||
238 | |||
239 | int sys_ptrace(long request, long pid, long addr, long data) | ||
240 | { | ||
241 | struct task_struct *child; | ||
242 | int ret = -EPERM; | ||
243 | |||
244 | lock_kernel(); | ||
245 | if (request == PTRACE_TRACEME) { | ||
246 | /* are we already being traced? */ | ||
247 | if (current->ptrace & PT_PTRACED) | ||
248 | goto out; | ||
249 | ret = security_ptrace(current->parent, current); | ||
250 | if (ret) | ||
251 | goto out; | ||
252 | /* set the ptrace bit in the process flags. */ | ||
253 | current->ptrace |= PT_PTRACED; | ||
254 | ret = 0; | ||
255 | goto out; | ||
256 | } | ||
257 | ret = -ESRCH; | ||
258 | read_lock(&tasklist_lock); | ||
259 | child = find_task_by_pid(pid); | ||
260 | if (child) | ||
261 | get_task_struct(child); | ||
262 | read_unlock(&tasklist_lock); | ||
263 | if (!child) | ||
264 | goto out; | ||
265 | |||
266 | ret = -EPERM; | ||
267 | if (pid == 1) /* you may not mess with init */ | ||
268 | goto out_tsk; | ||
269 | |||
270 | if (request == PTRACE_ATTACH) { | ||
271 | ret = ptrace_attach(child); | ||
272 | goto out_tsk; | ||
273 | } | ||
274 | |||
275 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | ||
276 | if (ret < 0) | ||
277 | goto out_tsk; | ||
278 | |||
279 | switch (request) { | ||
280 | /* when I and D space are separate, these will need to be fixed. */ | ||
281 | case PTRACE_PEEKTEXT: /* read word at location addr. */ | ||
282 | case PTRACE_PEEKDATA: { | ||
283 | unsigned long tmp; | ||
284 | int copied; | ||
285 | |||
286 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); | ||
287 | ret = -EIO; | ||
288 | if (copied != sizeof(tmp)) | ||
289 | break; | ||
290 | ret = put_user(tmp,(unsigned long __user *) data); | ||
291 | break; | ||
292 | } | ||
293 | |||
294 | /* read the word at location addr in the USER area. */ | ||
295 | /* XXX this will need fixing for 64-bit */ | ||
296 | case PTRACE_PEEKUSR: { | ||
297 | unsigned long index, tmp; | ||
298 | |||
299 | ret = -EIO; | ||
300 | /* convert to index and check */ | ||
301 | index = (unsigned long) addr >> 2; | ||
302 | if ((addr & 3) || index > PT_FPSCR | ||
303 | || child->thread.regs == NULL) | ||
304 | break; | ||
305 | |||
306 | CHECK_FULL_REGS(child->thread.regs); | ||
307 | if (index < PT_FPR0) { | ||
308 | tmp = get_reg(child, (int) index); | ||
309 | } else { | ||
310 | preempt_disable(); | ||
311 | if (child->thread.regs->msr & MSR_FP) | ||
312 | giveup_fpu(child); | ||
313 | preempt_enable(); | ||
314 | tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0]; | ||
315 | } | ||
316 | ret = put_user(tmp,(unsigned long __user *) data); | ||
317 | break; | ||
318 | } | ||
319 | |||
320 | /* If I and D space are separate, this will have to be fixed. */ | ||
321 | case PTRACE_POKETEXT: /* write the word at location addr. */ | ||
322 | case PTRACE_POKEDATA: | ||
323 | ret = 0; | ||
324 | if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) | ||
325 | break; | ||
326 | ret = -EIO; | ||
327 | break; | ||
328 | |||
329 | /* write the word at location addr in the USER area */ | ||
330 | case PTRACE_POKEUSR: { | ||
331 | unsigned long index; | ||
332 | |||
333 | ret = -EIO; | ||
334 | /* convert to index and check */ | ||
335 | index = (unsigned long) addr >> 2; | ||
336 | if ((addr & 3) || index > PT_FPSCR | ||
337 | || child->thread.regs == NULL) | ||
338 | break; | ||
339 | |||
340 | CHECK_FULL_REGS(child->thread.regs); | ||
341 | if (index == PT_ORIG_R3) | ||
342 | break; | ||
343 | if (index < PT_FPR0) { | ||
344 | ret = put_reg(child, index, data); | ||
345 | } else { | ||
346 | preempt_disable(); | ||
347 | if (child->thread.regs->msr & MSR_FP) | ||
348 | giveup_fpu(child); | ||
349 | preempt_enable(); | ||
350 | ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data; | ||
351 | ret = 0; | ||
352 | } | ||
353 | break; | ||
354 | } | ||
355 | |||
356 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | ||
357 | case PTRACE_CONT: { /* restart after signal. */ | ||
358 | ret = -EIO; | ||
359 | if ((unsigned long) data > _NSIG) | ||
360 | break; | ||
361 | if (request == PTRACE_SYSCALL) { | ||
362 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
363 | } else { | ||
364 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
365 | } | ||
366 | child->exit_code = data; | ||
367 | /* make sure the single step bit is not set. */ | ||
368 | clear_single_step(child); | ||
369 | wake_up_process(child); | ||
370 | ret = 0; | ||
371 | break; | ||
372 | } | ||
373 | |||
374 | /* | ||
375 | * make the child exit. Best I can do is send it a sigkill. | ||
376 | * perhaps it should be put in the status that it wants to | ||
377 | * exit. | ||
378 | */ | ||
379 | case PTRACE_KILL: { | ||
380 | ret = 0; | ||
381 | if (child->exit_state == EXIT_ZOMBIE) /* already dead */ | ||
382 | break; | ||
383 | child->exit_code = SIGKILL; | ||
384 | /* make sure the single step bit is not set. */ | ||
385 | clear_single_step(child); | ||
386 | wake_up_process(child); | ||
387 | break; | ||
388 | } | ||
389 | |||
390 | case PTRACE_SINGLESTEP: { /* set the trap flag. */ | ||
391 | ret = -EIO; | ||
392 | if ((unsigned long) data > _NSIG) | ||
393 | break; | ||
394 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
395 | set_single_step(child); | ||
396 | child->exit_code = data; | ||
397 | /* give it a chance to run. */ | ||
398 | wake_up_process(child); | ||
399 | ret = 0; | ||
400 | break; | ||
401 | } | ||
402 | |||
403 | case PTRACE_DETACH: | ||
404 | ret = ptrace_detach(child, data); | ||
405 | break; | ||
406 | |||
407 | #ifdef CONFIG_ALTIVEC | ||
408 | case PTRACE_GETVRREGS: | ||
409 | /* Get the child altivec register state. */ | ||
410 | preempt_disable(); | ||
411 | if (child->thread.regs->msr & MSR_VEC) | ||
412 | giveup_altivec(child); | ||
413 | preempt_enable(); | ||
414 | ret = get_vrregs((unsigned long __user *)data, child); | ||
415 | break; | ||
416 | |||
417 | case PTRACE_SETVRREGS: | ||
418 | /* Set the child altivec register state. */ | ||
419 | /* this is to clear the MSR_VEC bit to force a reload | ||
420 | * of register state from memory */ | ||
421 | preempt_disable(); | ||
422 | if (child->thread.regs->msr & MSR_VEC) | ||
423 | giveup_altivec(child); | ||
424 | preempt_enable(); | ||
425 | ret = set_vrregs(child, (unsigned long __user *)data); | ||
426 | break; | ||
427 | #endif | ||
428 | #ifdef CONFIG_SPE | ||
429 | case PTRACE_GETEVRREGS: | ||
430 | /* Get the child spe register state. */ | ||
431 | if (child->thread.regs->msr & MSR_SPE) | ||
432 | giveup_spe(child); | ||
433 | ret = get_evrregs((unsigned long __user *)data, child); | ||
434 | break; | ||
435 | |||
436 | case PTRACE_SETEVRREGS: | ||
437 | /* Set the child spe register state. */ | ||
438 | /* this is to clear the MSR_SPE bit to force a reload | ||
439 | * of register state from memory */ | ||
440 | if (child->thread.regs->msr & MSR_SPE) | ||
441 | giveup_spe(child); | ||
442 | ret = set_evrregs(child, (unsigned long __user *)data); | ||
443 | break; | ||
444 | #endif | ||
445 | |||
446 | default: | ||
447 | ret = ptrace_request(child, request, addr, data); | ||
448 | break; | ||
449 | } | ||
450 | out_tsk: | ||
451 | put_task_struct(child); | ||
452 | out: | ||
453 | unlock_kernel(); | ||
454 | return ret; | ||
455 | } | ||
456 | |||
457 | void do_syscall_trace(void) | ||
458 | { | ||
459 | if (!test_thread_flag(TIF_SYSCALL_TRACE) | ||
460 | || !(current->ptrace & PT_PTRACED)) | ||
461 | return; | ||
462 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) | ||
463 | ? 0x80 : 0)); | ||
464 | |||
465 | /* | ||
466 | * this isn't the same as continuing with a signal, but it will do | ||
467 | * for normal use. strace only continues with a signal if the | ||
468 | * stopping signal is not SIGTRAP. -brl | ||
469 | */ | ||
470 | if (current->exit_code) { | ||
471 | send_sig(current->exit_code, current, 1); | ||
472 | current->exit_code = 0; | ||
473 | } | ||
474 | } | ||
diff --git a/arch/ppc/kernel/semaphore.c b/arch/ppc/kernel/semaphore.c new file mode 100644 index 000000000000..2fe429b27c14 --- /dev/null +++ b/arch/ppc/kernel/semaphore.c | |||
@@ -0,0 +1,131 @@ | |||
1 | /* | ||
2 | * PowerPC-specific semaphore code. | ||
3 | * | ||
4 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * April 2001 - Reworked by Paul Mackerras <paulus@samba.org> | ||
12 | * to eliminate the SMP races in the old version between the updates | ||
13 | * of `count' and `waking'. Now we use negative `count' values to | ||
14 | * indicate that some process(es) are waiting for the semaphore. | ||
15 | */ | ||
16 | |||
17 | #include <linux/sched.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <asm/atomic.h> | ||
20 | #include <asm/semaphore.h> | ||
21 | #include <asm/errno.h> | ||
22 | |||
23 | /* | ||
24 | * Atomically update sem->count. | ||
25 | * This does the equivalent of the following: | ||
26 | * | ||
27 | * old_count = sem->count; | ||
28 | * tmp = MAX(old_count, 0) + incr; | ||
29 | * sem->count = tmp; | ||
30 | * return old_count; | ||
31 | */ | ||
32 | static inline int __sem_update_count(struct semaphore *sem, int incr) | ||
33 | { | ||
34 | int old_count, tmp; | ||
35 | |||
36 | __asm__ __volatile__("\n" | ||
37 | "1: lwarx %0,0,%3\n" | ||
38 | " srawi %1,%0,31\n" | ||
39 | " andc %1,%0,%1\n" | ||
40 | " add %1,%1,%4\n" | ||
41 | PPC405_ERR77(0,%3) | ||
42 | " stwcx. %1,0,%3\n" | ||
43 | " bne 1b" | ||
44 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | ||
45 | : "r" (&sem->count), "r" (incr), "m" (sem->count) | ||
46 | : "cc"); | ||
47 | |||
48 | return old_count; | ||
49 | } | ||
50 | |||
51 | void __up(struct semaphore *sem) | ||
52 | { | ||
53 | /* | ||
54 | * Note that we incremented count in up() before we came here, | ||
55 | * but that was ineffective since the result was <= 0, and | ||
56 | * any negative value of count is equivalent to 0. | ||
57 | * This ends up setting count to 1, unless count is now > 0 | ||
58 | * (i.e. because some other cpu has called up() in the meantime), | ||
59 | * in which case we just increment count. | ||
60 | */ | ||
61 | __sem_update_count(sem, 1); | ||
62 | wake_up(&sem->wait); | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * Note that when we come in to __down or __down_interruptible, | ||
67 | * we have already decremented count, but that decrement was | ||
68 | * ineffective since the result was < 0, and any negative value | ||
69 | * of count is equivalent to 0. | ||
70 | * Thus it is only when we decrement count from some value > 0 | ||
71 | * that we have actually got the semaphore. | ||
72 | */ | ||
73 | void __sched __down(struct semaphore *sem) | ||
74 | { | ||
75 | struct task_struct *tsk = current; | ||
76 | DECLARE_WAITQUEUE(wait, tsk); | ||
77 | |||
78 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
79 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
80 | smp_wmb(); | ||
81 | |||
82 | /* | ||
83 | * Try to get the semaphore. If the count is > 0, then we've | ||
84 | * got the semaphore; we decrement count and exit the loop. | ||
85 | * If the count is 0 or negative, we set it to -1, indicating | ||
86 | * that we are asleep, and then sleep. | ||
87 | */ | ||
88 | while (__sem_update_count(sem, -1) <= 0) { | ||
89 | schedule(); | ||
90 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
91 | } | ||
92 | remove_wait_queue(&sem->wait, &wait); | ||
93 | tsk->state = TASK_RUNNING; | ||
94 | |||
95 | /* | ||
96 | * If there are any more sleepers, wake one of them up so | ||
97 | * that it can either get the semaphore, or set count to -1 | ||
98 | * indicating that there are still processes sleeping. | ||
99 | */ | ||
100 | wake_up(&sem->wait); | ||
101 | } | ||
102 | |||
103 | int __sched __down_interruptible(struct semaphore * sem) | ||
104 | { | ||
105 | int retval = 0; | ||
106 | struct task_struct *tsk = current; | ||
107 | DECLARE_WAITQUEUE(wait, tsk); | ||
108 | |||
109 | tsk->state = TASK_INTERRUPTIBLE; | ||
110 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
111 | smp_wmb(); | ||
112 | |||
113 | while (__sem_update_count(sem, -1) <= 0) { | ||
114 | if (signal_pending(current)) { | ||
115 | /* | ||
116 | * A signal is pending - give up trying. | ||
117 | * Set sem->count to 0 if it is negative, | ||
118 | * since we are no longer sleeping. | ||
119 | */ | ||
120 | __sem_update_count(sem, 0); | ||
121 | retval = -EINTR; | ||
122 | break; | ||
123 | } | ||
124 | schedule(); | ||
125 | tsk->state = TASK_INTERRUPTIBLE; | ||
126 | } | ||
127 | tsk->state = TASK_RUNNING; | ||
128 | remove_wait_queue(&sem->wait, &wait); | ||
129 | wake_up(&sem->wait); | ||
130 | return retval; | ||
131 | } | ||
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c new file mode 100644 index 000000000000..e97ce635b99e --- /dev/null +++ b/arch/ppc/kernel/setup.c | |||
@@ -0,0 +1,778 @@ | |||
1 | /* | ||
2 | * Common prep/pmac/chrp boot and setup code. | ||
3 | */ | ||
4 | |||
5 | #include <linux/config.h> | ||
6 | #include <linux/module.h> | ||
7 | #include <linux/string.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/reboot.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/initrd.h> | ||
14 | #include <linux/ide.h> | ||
15 | #include <linux/tty.h> | ||
16 | #include <linux/bootmem.h> | ||
17 | #include <linux/seq_file.h> | ||
18 | #include <linux/root_dev.h> | ||
19 | #include <linux/cpu.h> | ||
20 | #include <linux/console.h> | ||
21 | |||
22 | #include <asm/residual.h> | ||
23 | #include <asm/io.h> | ||
24 | #include <asm/prom.h> | ||
25 | #include <asm/processor.h> | ||
26 | #include <asm/pgtable.h> | ||
27 | #include <asm/bootinfo.h> | ||
28 | #include <asm/setup.h> | ||
29 | #include <asm/amigappc.h> | ||
30 | #include <asm/smp.h> | ||
31 | #include <asm/elf.h> | ||
32 | #include <asm/cputable.h> | ||
33 | #include <asm/bootx.h> | ||
34 | #include <asm/btext.h> | ||
35 | #include <asm/machdep.h> | ||
36 | #include <asm/uaccess.h> | ||
37 | #include <asm/system.h> | ||
38 | #include <asm/pmac_feature.h> | ||
39 | #include <asm/sections.h> | ||
40 | #include <asm/nvram.h> | ||
41 | #include <asm/xmon.h> | ||
42 | #include <asm/ocp.h> | ||
43 | |||
44 | #if defined(CONFIG_85xx) || defined(CONFIG_83xx) | ||
45 | #include <asm/ppc_sys.h> | ||
46 | #endif | ||
47 | |||
48 | #if defined CONFIG_KGDB | ||
49 | #include <asm/kgdb.h> | ||
50 | #endif | ||
51 | |||
52 | extern void platform_init(unsigned long r3, unsigned long r4, | ||
53 | unsigned long r5, unsigned long r6, unsigned long r7); | ||
54 | extern void bootx_init(unsigned long r4, unsigned long phys); | ||
55 | extern void identify_cpu(unsigned long offset, unsigned long cpu); | ||
56 | extern void do_cpu_ftr_fixups(unsigned long offset); | ||
57 | extern void reloc_got2(unsigned long offset); | ||
58 | |||
59 | extern void ppc6xx_idle(void); | ||
60 | extern void power4_idle(void); | ||
61 | |||
62 | extern boot_infos_t *boot_infos; | ||
63 | struct ide_machdep_calls ppc_ide_md; | ||
64 | char *sysmap; | ||
65 | unsigned long sysmap_size; | ||
66 | |||
67 | /* Used with the BI_MEMSIZE bootinfo parameter to store the memory | ||
68 | size value reported by the boot loader. */ | ||
69 | unsigned long boot_mem_size; | ||
70 | |||
71 | unsigned long ISA_DMA_THRESHOLD; | ||
72 | unsigned long DMA_MODE_READ, DMA_MODE_WRITE; | ||
73 | |||
74 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
75 | int _machine = 0; | ||
76 | |||
77 | extern void prep_init(unsigned long r3, unsigned long r4, | ||
78 | unsigned long r5, unsigned long r6, unsigned long r7); | ||
79 | extern void pmac_init(unsigned long r3, unsigned long r4, | ||
80 | unsigned long r5, unsigned long r6, unsigned long r7); | ||
81 | extern void chrp_init(unsigned long r3, unsigned long r4, | ||
82 | unsigned long r5, unsigned long r6, unsigned long r7); | ||
83 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | ||
84 | |||
85 | #ifdef CONFIG_MAGIC_SYSRQ | ||
86 | unsigned long SYSRQ_KEY = 0x54; | ||
87 | #endif /* CONFIG_MAGIC_SYSRQ */ | ||
88 | |||
89 | #ifdef CONFIG_VGA_CONSOLE | ||
90 | unsigned long vgacon_remap_base; | ||
91 | #endif | ||
92 | |||
93 | struct machdep_calls ppc_md; | ||
94 | |||
95 | /* | ||
96 | * These are used in binfmt_elf.c to put aux entries on the stack | ||
97 | * for each elf executable being started. | ||
98 | */ | ||
99 | int dcache_bsize; | ||
100 | int icache_bsize; | ||
101 | int ucache_bsize; | ||
102 | |||
103 | #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_FB_VGA16) || \ | ||
104 | defined(CONFIG_FB_VGA16_MODULE) || defined(CONFIG_FB_VESA) | ||
105 | struct screen_info screen_info = { | ||
106 | 0, 25, /* orig-x, orig-y */ | ||
107 | 0, /* unused */ | ||
108 | 0, /* orig-video-page */ | ||
109 | 0, /* orig-video-mode */ | ||
110 | 80, /* orig-video-cols */ | ||
111 | 0,0,0, /* ega_ax, ega_bx, ega_cx */ | ||
112 | 25, /* orig-video-lines */ | ||
113 | 1, /* orig-video-isVGA */ | ||
114 | 16 /* orig-video-points */ | ||
115 | }; | ||
116 | #endif /* CONFIG_VGA_CONSOLE || CONFIG_FB_VGA16 || CONFIG_FB_VESA */ | ||
117 | |||
118 | void machine_restart(char *cmd) | ||
119 | { | ||
120 | #ifdef CONFIG_NVRAM | ||
121 | nvram_sync(); | ||
122 | #endif | ||
123 | ppc_md.restart(cmd); | ||
124 | } | ||
125 | |||
126 | EXPORT_SYMBOL(machine_restart); | ||
127 | |||
128 | void machine_power_off(void) | ||
129 | { | ||
130 | #ifdef CONFIG_NVRAM | ||
131 | nvram_sync(); | ||
132 | #endif | ||
133 | ppc_md.power_off(); | ||
134 | } | ||
135 | |||
136 | EXPORT_SYMBOL(machine_power_off); | ||
137 | |||
138 | void machine_halt(void) | ||
139 | { | ||
140 | #ifdef CONFIG_NVRAM | ||
141 | nvram_sync(); | ||
142 | #endif | ||
143 | ppc_md.halt(); | ||
144 | } | ||
145 | |||
146 | EXPORT_SYMBOL(machine_halt); | ||
147 | |||
148 | void (*pm_power_off)(void) = machine_power_off; | ||
149 | |||
150 | #ifdef CONFIG_TAU | ||
151 | extern u32 cpu_temp(unsigned long cpu); | ||
152 | extern u32 cpu_temp_both(unsigned long cpu); | ||
153 | #endif /* CONFIG_TAU */ | ||
154 | |||
155 | int show_cpuinfo(struct seq_file *m, void *v) | ||
156 | { | ||
157 | int i = (int) v - 1; | ||
158 | int err = 0; | ||
159 | unsigned int pvr; | ||
160 | unsigned short maj, min; | ||
161 | unsigned long lpj; | ||
162 | |||
163 | if (i >= NR_CPUS) { | ||
164 | /* Show summary information */ | ||
165 | #ifdef CONFIG_SMP | ||
166 | unsigned long bogosum = 0; | ||
167 | for (i = 0; i < NR_CPUS; ++i) | ||
168 | if (cpu_online(i)) | ||
169 | bogosum += cpu_data[i].loops_per_jiffy; | ||
170 | seq_printf(m, "total bogomips\t: %lu.%02lu\n", | ||
171 | bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); | ||
172 | #endif /* CONFIG_SMP */ | ||
173 | |||
174 | if (ppc_md.show_cpuinfo != NULL) | ||
175 | err = ppc_md.show_cpuinfo(m); | ||
176 | return err; | ||
177 | } | ||
178 | |||
179 | #ifdef CONFIG_SMP | ||
180 | if (!cpu_online(i)) | ||
181 | return 0; | ||
182 | pvr = cpu_data[i].pvr; | ||
183 | lpj = cpu_data[i].loops_per_jiffy; | ||
184 | #else | ||
185 | pvr = mfspr(SPRN_PVR); | ||
186 | lpj = loops_per_jiffy; | ||
187 | #endif | ||
188 | |||
189 | seq_printf(m, "processor\t: %d\n", i); | ||
190 | seq_printf(m, "cpu\t\t: "); | ||
191 | |||
192 | if (cur_cpu_spec[i]->pvr_mask) | ||
193 | seq_printf(m, "%s", cur_cpu_spec[i]->cpu_name); | ||
194 | else | ||
195 | seq_printf(m, "unknown (%08x)", pvr); | ||
196 | #ifdef CONFIG_ALTIVEC | ||
197 | if (cur_cpu_spec[i]->cpu_features & CPU_FTR_ALTIVEC) | ||
198 | seq_printf(m, ", altivec supported"); | ||
199 | #endif | ||
200 | seq_printf(m, "\n"); | ||
201 | |||
202 | #ifdef CONFIG_TAU | ||
203 | if (cur_cpu_spec[i]->cpu_features & CPU_FTR_TAU) { | ||
204 | #ifdef CONFIG_TAU_AVERAGE | ||
205 | /* more straightforward, but potentially misleading */ | ||
206 | seq_printf(m, "temperature \t: %u C (uncalibrated)\n", | ||
207 | cpu_temp(i)); | ||
208 | #else | ||
209 | /* show the actual temp sensor range */ | ||
210 | u32 temp; | ||
211 | temp = cpu_temp_both(i); | ||
212 | seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n", | ||
213 | temp & 0xff, temp >> 16); | ||
214 | #endif | ||
215 | } | ||
216 | #endif /* CONFIG_TAU */ | ||
217 | |||
218 | if (ppc_md.show_percpuinfo != NULL) { | ||
219 | err = ppc_md.show_percpuinfo(m, i); | ||
220 | if (err) | ||
221 | return err; | ||
222 | } | ||
223 | |||
224 | switch (PVR_VER(pvr)) { | ||
225 | case 0x0020: /* 403 family */ | ||
226 | maj = PVR_MAJ(pvr) + 1; | ||
227 | min = PVR_MIN(pvr); | ||
228 | break; | ||
229 | case 0x1008: /* 740P/750P ?? */ | ||
230 | maj = ((pvr >> 8) & 0xFF) - 1; | ||
231 | min = pvr & 0xFF; | ||
232 | break; | ||
233 | case 0x8083: /* e300 */ | ||
234 | maj = PVR_MAJ(pvr); | ||
235 | min = PVR_MIN(pvr); | ||
236 | break; | ||
237 | case 0x8020: /* e500 */ | ||
238 | maj = PVR_MAJ(pvr); | ||
239 | min = PVR_MIN(pvr); | ||
240 | break; | ||
241 | default: | ||
242 | maj = (pvr >> 8) & 0xFF; | ||
243 | min = pvr & 0xFF; | ||
244 | break; | ||
245 | } | ||
246 | |||
247 | seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n", | ||
248 | maj, min, PVR_VER(pvr), PVR_REV(pvr)); | ||
249 | |||
250 | seq_printf(m, "bogomips\t: %lu.%02lu\n", | ||
251 | lpj / (500000/HZ), (lpj / (5000/HZ)) % 100); | ||
252 | |||
253 | #if defined(CONFIG_85xx) || defined(CONFIG_83xx) | ||
254 | if (cur_ppc_sys_spec->ppc_sys_name) | ||
255 | seq_printf(m, "chipset\t\t: %s\n", | ||
256 | cur_ppc_sys_spec->ppc_sys_name); | ||
257 | #endif | ||
258 | |||
259 | #ifdef CONFIG_SMP | ||
260 | seq_printf(m, "\n"); | ||
261 | #endif | ||
262 | |||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static void *c_start(struct seq_file *m, loff_t *pos) | ||
267 | { | ||
268 | int i = *pos; | ||
269 | |||
270 | return i <= NR_CPUS? (void *) (i + 1): NULL; | ||
271 | } | ||
272 | |||
273 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | ||
274 | { | ||
275 | ++*pos; | ||
276 | return c_start(m, pos); | ||
277 | } | ||
278 | |||
279 | static void c_stop(struct seq_file *m, void *v) | ||
280 | { | ||
281 | } | ||
282 | |||
283 | struct seq_operations cpuinfo_op = { | ||
284 | .start =c_start, | ||
285 | .next = c_next, | ||
286 | .stop = c_stop, | ||
287 | .show = show_cpuinfo, | ||
288 | }; | ||
289 | |||
290 | /* | ||
291 | * We're called here very early in the boot. We determine the machine | ||
292 | * type and call the appropriate low-level setup functions. | ||
293 | * -- Cort <cort@fsmlabs.com> | ||
294 | * | ||
295 | * Note that the kernel may be running at an address which is different | ||
296 | * from the address that it was linked at, so we must use RELOC/PTRRELOC | ||
297 | * to access static data (including strings). -- paulus | ||
298 | */ | ||
299 | __init | ||
300 | unsigned long | ||
301 | early_init(int r3, int r4, int r5) | ||
302 | { | ||
303 | unsigned long phys; | ||
304 | unsigned long offset = reloc_offset(); | ||
305 | |||
306 | /* Default */ | ||
307 | phys = offset + KERNELBASE; | ||
308 | |||
309 | /* First zero the BSS -- use memset, some arches don't have | ||
310 | * caches on yet */ | ||
311 | memset_io(PTRRELOC(&__bss_start), 0, _end - __bss_start); | ||
312 | |||
313 | /* | ||
314 | * Identify the CPU type and fix up code sections | ||
315 | * that depend on which cpu we have. | ||
316 | */ | ||
317 | identify_cpu(offset, 0); | ||
318 | do_cpu_ftr_fixups(offset); | ||
319 | |||
320 | #if defined(CONFIG_PPC_MULTIPLATFORM) | ||
321 | reloc_got2(offset); | ||
322 | |||
323 | /* If we came here from BootX, clear the screen, | ||
324 | * set up some pointers and return. */ | ||
325 | if ((r3 == 0x426f6f58) && (r5 == 0)) | ||
326 | bootx_init(r4, phys); | ||
327 | |||
328 | /* | ||
329 | * don't do anything on prep | ||
330 | * for now, don't use bootinfo because it breaks yaboot 0.5 | ||
331 | * and assume that if we didn't find a magic number, we have OF | ||
332 | */ | ||
333 | else if (*(unsigned long *)(0) != 0xdeadc0de) | ||
334 | phys = prom_init(r3, r4, (prom_entry)r5); | ||
335 | |||
336 | reloc_got2(-offset); | ||
337 | #endif | ||
338 | |||
339 | return phys; | ||
340 | } | ||
341 | |||
342 | #ifdef CONFIG_PPC_OF | ||
343 | /* | ||
344 | * Assume here that all clock rates are the same in a | ||
345 | * smp system. -- Cort | ||
346 | */ | ||
347 | int __openfirmware | ||
348 | of_show_percpuinfo(struct seq_file *m, int i) | ||
349 | { | ||
350 | struct device_node *cpu_node; | ||
351 | u32 *fp; | ||
352 | int s; | ||
353 | |||
354 | cpu_node = find_type_devices("cpu"); | ||
355 | if (!cpu_node) | ||
356 | return 0; | ||
357 | for (s = 0; s < i && cpu_node->next; s++) | ||
358 | cpu_node = cpu_node->next; | ||
359 | fp = (u32 *)get_property(cpu_node, "clock-frequency", NULL); | ||
360 | if (fp) | ||
361 | seq_printf(m, "clock\t\t: %dMHz\n", *fp / 1000000); | ||
362 | return 0; | ||
363 | } | ||
364 | |||
365 | void __init | ||
366 | intuit_machine_type(void) | ||
367 | { | ||
368 | char *model; | ||
369 | struct device_node *root; | ||
370 | |||
371 | /* ask the OF info if we're a chrp or pmac */ | ||
372 | root = find_path_device("/"); | ||
373 | if (root != 0) { | ||
374 | /* assume pmac unless proven to be chrp -- Cort */ | ||
375 | _machine = _MACH_Pmac; | ||
376 | model = get_property(root, "device_type", NULL); | ||
377 | if (model && !strncmp("chrp", model, 4)) | ||
378 | _machine = _MACH_chrp; | ||
379 | else { | ||
380 | model = get_property(root, "model", NULL); | ||
381 | if (model && !strncmp(model, "IBM", 3)) | ||
382 | _machine = _MACH_chrp; | ||
383 | } | ||
384 | } | ||
385 | } | ||
386 | #endif | ||
387 | |||
388 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
389 | /* | ||
390 | * The PPC_MULTIPLATFORM version of platform_init... | ||
391 | */ | ||
392 | void __init | ||
393 | platform_init(unsigned long r3, unsigned long r4, unsigned long r5, | ||
394 | unsigned long r6, unsigned long r7) | ||
395 | { | ||
396 | #ifdef CONFIG_BOOTX_TEXT | ||
397 | if (boot_text_mapped) { | ||
398 | btext_clearscreen(); | ||
399 | btext_welcome(); | ||
400 | } | ||
401 | #endif | ||
402 | |||
403 | parse_bootinfo(find_bootinfo()); | ||
404 | |||
405 | /* if we didn't get any bootinfo telling us what we are... */ | ||
406 | if (_machine == 0) { | ||
407 | /* prep boot loader tells us if we're prep or not */ | ||
408 | if ( *(unsigned long *)(KERNELBASE) == (0xdeadc0de) ) | ||
409 | _machine = _MACH_prep; | ||
410 | } | ||
411 | |||
412 | /* not much more to do here, if prep */ | ||
413 | if (_machine == _MACH_prep) { | ||
414 | prep_init(r3, r4, r5, r6, r7); | ||
415 | return; | ||
416 | } | ||
417 | |||
418 | /* prom_init has already been called from __start */ | ||
419 | if (boot_infos) | ||
420 | relocate_nodes(); | ||
421 | |||
422 | /* If we aren't PReP, we can find out if we're Pmac | ||
423 | * or CHRP with this. */ | ||
424 | if (_machine == 0) | ||
425 | intuit_machine_type(); | ||
426 | |||
427 | /* finish_device_tree may need _machine defined. */ | ||
428 | finish_device_tree(); | ||
429 | |||
430 | /* | ||
431 | * If we were booted via quik, r3 points to the physical | ||
432 | * address of the command-line parameters. | ||
433 | * If we were booted from an xcoff image (i.e. netbooted or | ||
434 | * booted from floppy), we get the command line from the | ||
435 | * bootargs property of the /chosen node. | ||
436 | * If an initial ramdisk is present, r3 and r4 | ||
437 | * are used for initrd_start and initrd_size, | ||
438 | * otherwise they contain 0xdeadbeef. | ||
439 | */ | ||
440 | if (r3 >= 0x4000 && r3 < 0x800000 && r4 == 0) { | ||
441 | strlcpy(cmd_line, (char *)r3 + KERNELBASE, | ||
442 | sizeof(cmd_line)); | ||
443 | } else if (boot_infos != 0) { | ||
444 | /* booted by BootX - check for ramdisk */ | ||
445 | if (boot_infos->kernelParamsOffset != 0) | ||
446 | strlcpy(cmd_line, (char *) boot_infos | ||
447 | + boot_infos->kernelParamsOffset, | ||
448 | sizeof(cmd_line)); | ||
449 | #ifdef CONFIG_BLK_DEV_INITRD | ||
450 | if (boot_infos->ramDisk) { | ||
451 | initrd_start = (unsigned long) boot_infos | ||
452 | + boot_infos->ramDisk; | ||
453 | initrd_end = initrd_start + boot_infos->ramDiskSize; | ||
454 | initrd_below_start_ok = 1; | ||
455 | } | ||
456 | #endif | ||
457 | } else { | ||
458 | struct device_node *chosen; | ||
459 | char *p; | ||
460 | |||
461 | #ifdef CONFIG_BLK_DEV_INITRD | ||
462 | if (r3 && r4 && r4 != 0xdeadbeef) { | ||
463 | if (r3 < KERNELBASE) | ||
464 | r3 += KERNELBASE; | ||
465 | initrd_start = r3; | ||
466 | initrd_end = r3 + r4; | ||
467 | ROOT_DEV = Root_RAM0; | ||
468 | initrd_below_start_ok = 1; | ||
469 | } | ||
470 | #endif | ||
471 | chosen = find_devices("chosen"); | ||
472 | if (chosen != NULL) { | ||
473 | p = get_property(chosen, "bootargs", NULL); | ||
474 | if (p && *p) { | ||
475 | strlcpy(cmd_line, p, sizeof(cmd_line)); | ||
476 | } | ||
477 | } | ||
478 | } | ||
479 | #ifdef CONFIG_ADB | ||
480 | if (strstr(cmd_line, "adb_sync")) { | ||
481 | extern int __adb_probe_sync; | ||
482 | __adb_probe_sync = 1; | ||
483 | } | ||
484 | #endif /* CONFIG_ADB */ | ||
485 | |||
486 | switch (_machine) { | ||
487 | case _MACH_Pmac: | ||
488 | pmac_init(r3, r4, r5, r6, r7); | ||
489 | break; | ||
490 | case _MACH_chrp: | ||
491 | chrp_init(r3, r4, r5, r6, r7); | ||
492 | break; | ||
493 | } | ||
494 | } | ||
495 | |||
496 | #ifdef CONFIG_SERIAL_CORE_CONSOLE | ||
497 | extern char *of_stdout_device; | ||
498 | |||
499 | static int __init set_preferred_console(void) | ||
500 | { | ||
501 | struct device_node *prom_stdout; | ||
502 | char *name; | ||
503 | int offset; | ||
504 | |||
505 | if (of_stdout_device == NULL) | ||
506 | return -ENODEV; | ||
507 | |||
508 | /* The user has requested a console so this is already set up. */ | ||
509 | if (strstr(saved_command_line, "console=")) | ||
510 | return -EBUSY; | ||
511 | |||
512 | prom_stdout = find_path_device(of_stdout_device); | ||
513 | if (!prom_stdout) | ||
514 | return -ENODEV; | ||
515 | |||
516 | name = (char *)get_property(prom_stdout, "name", NULL); | ||
517 | if (!name) | ||
518 | return -ENODEV; | ||
519 | |||
520 | if (strcmp(name, "serial") == 0) { | ||
521 | int i; | ||
522 | u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i); | ||
523 | if (i > 8) { | ||
524 | switch (reg[1]) { | ||
525 | case 0x3f8: | ||
526 | offset = 0; | ||
527 | break; | ||
528 | case 0x2f8: | ||
529 | offset = 1; | ||
530 | break; | ||
531 | case 0x898: | ||
532 | offset = 2; | ||
533 | break; | ||
534 | case 0x890: | ||
535 | offset = 3; | ||
536 | break; | ||
537 | default: | ||
538 | /* We dont recognise the serial port */ | ||
539 | return -ENODEV; | ||
540 | } | ||
541 | } | ||
542 | } else if (strcmp(name, "ch-a") == 0) | ||
543 | offset = 0; | ||
544 | else if (strcmp(name, "ch-b") == 0) | ||
545 | offset = 1; | ||
546 | else | ||
547 | return -ENODEV; | ||
548 | return add_preferred_console("ttyS", offset, NULL); | ||
549 | } | ||
550 | console_initcall(set_preferred_console); | ||
551 | #endif /* CONFIG_SERIAL_CORE_CONSOLE */ | ||
552 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | ||
553 | |||
554 | struct bi_record *find_bootinfo(void) | ||
555 | { | ||
556 | struct bi_record *rec; | ||
557 | |||
558 | rec = (struct bi_record *)_ALIGN((ulong)__bss_start+(1<<20)-1,(1<<20)); | ||
559 | if ( rec->tag != BI_FIRST ) { | ||
560 | /* | ||
561 | * This 0x10000 offset is a terrible hack but it will go away when | ||
562 | * we have the bootloader handle all the relocation and | ||
563 | * prom calls -- Cort | ||
564 | */ | ||
565 | rec = (struct bi_record *)_ALIGN((ulong)__bss_start+0x10000+(1<<20)-1,(1<<20)); | ||
566 | if ( rec->tag != BI_FIRST ) | ||
567 | return NULL; | ||
568 | } | ||
569 | return rec; | ||
570 | } | ||
571 | |||
572 | void parse_bootinfo(struct bi_record *rec) | ||
573 | { | ||
574 | if (rec == NULL || rec->tag != BI_FIRST) | ||
575 | return; | ||
576 | while (rec->tag != BI_LAST) { | ||
577 | ulong *data = rec->data; | ||
578 | switch (rec->tag) { | ||
579 | case BI_CMD_LINE: | ||
580 | strlcpy(cmd_line, (void *)data, sizeof(cmd_line)); | ||
581 | break; | ||
582 | case BI_SYSMAP: | ||
583 | sysmap = (char *)((data[0] >= (KERNELBASE)) ? data[0] : | ||
584 | (data[0]+KERNELBASE)); | ||
585 | sysmap_size = data[1]; | ||
586 | break; | ||
587 | #ifdef CONFIG_BLK_DEV_INITRD | ||
588 | case BI_INITRD: | ||
589 | initrd_start = data[0] + KERNELBASE; | ||
590 | initrd_end = data[0] + data[1] + KERNELBASE; | ||
591 | break; | ||
592 | #endif /* CONFIG_BLK_DEV_INITRD */ | ||
593 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
594 | case BI_MACHTYPE: | ||
595 | _machine = data[0]; | ||
596 | break; | ||
597 | #endif | ||
598 | case BI_MEMSIZE: | ||
599 | boot_mem_size = data[0]; | ||
600 | break; | ||
601 | } | ||
602 | rec = (struct bi_record *)((ulong)rec + rec->size); | ||
603 | } | ||
604 | } | ||
605 | |||
606 | /* | ||
607 | * Find out what kind of machine we're on and save any data we need | ||
608 | * from the early boot process (devtree is copied on pmac by prom_init()). | ||
609 | * This is called very early on the boot process, after a minimal | ||
610 | * MMU environment has been set up but before MMU_init is called. | ||
611 | */ | ||
612 | void __init | ||
613 | machine_init(unsigned long r3, unsigned long r4, unsigned long r5, | ||
614 | unsigned long r6, unsigned long r7) | ||
615 | { | ||
616 | #ifdef CONFIG_CMDLINE | ||
617 | strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line)); | ||
618 | #endif /* CONFIG_CMDLINE */ | ||
619 | |||
620 | #ifdef CONFIG_6xx | ||
621 | ppc_md.power_save = ppc6xx_idle; | ||
622 | #endif | ||
623 | #ifdef CONFIG_POWER4 | ||
624 | ppc_md.power_save = power4_idle; | ||
625 | #endif | ||
626 | |||
627 | platform_init(r3, r4, r5, r6, r7); | ||
628 | |||
629 | if (ppc_md.progress) | ||
630 | ppc_md.progress("id mach(): done", 0x200); | ||
631 | } | ||
632 | |||
633 | /* Checks "l2cr=xxxx" command-line option */ | ||
634 | int __init ppc_setup_l2cr(char *str) | ||
635 | { | ||
636 | if (cpu_has_feature(CPU_FTR_L2CR)) { | ||
637 | unsigned long val = simple_strtoul(str, NULL, 0); | ||
638 | printk(KERN_INFO "l2cr set to %lx\n", val); | ||
639 | _set_L2CR(0); /* force invalidate by disable cache */ | ||
640 | _set_L2CR(val); /* and enable it */ | ||
641 | } | ||
642 | return 1; | ||
643 | } | ||
644 | __setup("l2cr=", ppc_setup_l2cr); | ||
645 | |||
646 | #ifdef CONFIG_GENERIC_NVRAM | ||
647 | |||
648 | /* Generic nvram hooks used by drivers/char/gen_nvram.c */ | ||
649 | unsigned char nvram_read_byte(int addr) | ||
650 | { | ||
651 | if (ppc_md.nvram_read_val) | ||
652 | return ppc_md.nvram_read_val(addr); | ||
653 | return 0xff; | ||
654 | } | ||
655 | EXPORT_SYMBOL(nvram_read_byte); | ||
656 | |||
657 | void nvram_write_byte(unsigned char val, int addr) | ||
658 | { | ||
659 | if (ppc_md.nvram_write_val) | ||
660 | ppc_md.nvram_write_val(addr, val); | ||
661 | } | ||
662 | EXPORT_SYMBOL(nvram_write_byte); | ||
663 | |||
664 | void nvram_sync(void) | ||
665 | { | ||
666 | if (ppc_md.nvram_sync) | ||
667 | ppc_md.nvram_sync(); | ||
668 | } | ||
669 | EXPORT_SYMBOL(nvram_sync); | ||
670 | |||
671 | #endif /* CONFIG_NVRAM */ | ||
672 | |||
673 | static struct cpu cpu_devices[NR_CPUS]; | ||
674 | |||
675 | int __init ppc_init(void) | ||
676 | { | ||
677 | int i; | ||
678 | |||
679 | /* clear the progress line */ | ||
680 | if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); | ||
681 | |||
682 | /* register CPU devices */ | ||
683 | for (i = 0; i < NR_CPUS; i++) | ||
684 | if (cpu_possible(i)) | ||
685 | register_cpu(&cpu_devices[i], i, NULL); | ||
686 | |||
687 | /* call platform init */ | ||
688 | if (ppc_md.init != NULL) { | ||
689 | ppc_md.init(); | ||
690 | } | ||
691 | return 0; | ||
692 | } | ||
693 | |||
694 | arch_initcall(ppc_init); | ||
695 | |||
696 | /* Warning, IO base is not yet inited */ | ||
697 | void __init setup_arch(char **cmdline_p) | ||
698 | { | ||
699 | extern char *klimit; | ||
700 | extern void do_init_bootmem(void); | ||
701 | |||
702 | /* so udelay does something sensible, assume <= 1000 bogomips */ | ||
703 | loops_per_jiffy = 500000000 / HZ; | ||
704 | |||
705 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
706 | /* This could be called "early setup arch", it must be done | ||
707 | * now because xmon need it | ||
708 | */ | ||
709 | if (_machine == _MACH_Pmac) | ||
710 | pmac_feature_init(); /* New cool way */ | ||
711 | #endif | ||
712 | |||
713 | #ifdef CONFIG_XMON | ||
714 | xmon_map_scc(); | ||
715 | if (strstr(cmd_line, "xmon")) | ||
716 | xmon(NULL); | ||
717 | #endif /* CONFIG_XMON */ | ||
718 | if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab); | ||
719 | |||
720 | #if defined(CONFIG_KGDB) | ||
721 | if (ppc_md.kgdb_map_scc) | ||
722 | ppc_md.kgdb_map_scc(); | ||
723 | set_debug_traps(); | ||
724 | if (strstr(cmd_line, "gdb")) { | ||
725 | if (ppc_md.progress) | ||
726 | ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000); | ||
727 | printk("kgdb breakpoint activated\n"); | ||
728 | breakpoint(); | ||
729 | } | ||
730 | #endif | ||
731 | |||
732 | /* | ||
733 | * Set cache line size based on type of cpu as a default. | ||
734 | * Systems with OF can look in the properties on the cpu node(s) | ||
735 | * for a possibly more accurate value. | ||
736 | */ | ||
737 | if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) { | ||
738 | dcache_bsize = cur_cpu_spec[0]->dcache_bsize; | ||
739 | icache_bsize = cur_cpu_spec[0]->icache_bsize; | ||
740 | ucache_bsize = 0; | ||
741 | } else | ||
742 | ucache_bsize = dcache_bsize = icache_bsize | ||
743 | = cur_cpu_spec[0]->dcache_bsize; | ||
744 | |||
745 | /* reboot on panic */ | ||
746 | panic_timeout = 180; | ||
747 | |||
748 | init_mm.start_code = PAGE_OFFSET; | ||
749 | init_mm.end_code = (unsigned long) _etext; | ||
750 | init_mm.end_data = (unsigned long) _edata; | ||
751 | init_mm.brk = (unsigned long) klimit; | ||
752 | |||
753 | /* Save unparsed command line copy for /proc/cmdline */ | ||
754 | strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE); | ||
755 | *cmdline_p = cmd_line; | ||
756 | |||
757 | /* set up the bootmem stuff with available memory */ | ||
758 | do_init_bootmem(); | ||
759 | if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab); | ||
760 | |||
761 | #ifdef CONFIG_PPC_OCP | ||
762 | /* Initialize OCP device list */ | ||
763 | ocp_early_init(); | ||
764 | if ( ppc_md.progress ) ppc_md.progress("ocp: exit", 0x3eab); | ||
765 | #endif | ||
766 | |||
767 | #ifdef CONFIG_DUMMY_CONSOLE | ||
768 | conswitchp = &dummy_con; | ||
769 | #endif | ||
770 | |||
771 | ppc_md.setup_arch(); | ||
772 | if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); | ||
773 | |||
774 | paging_init(); | ||
775 | |||
776 | /* this is for modules since _machine can be a define -- Cort */ | ||
777 | ppc_md.ppc_machine = _machine; | ||
778 | } | ||
diff --git a/arch/ppc/kernel/signal.c b/arch/ppc/kernel/signal.c new file mode 100644 index 000000000000..9567d3041ea7 --- /dev/null +++ b/arch/ppc/kernel/signal.c | |||
@@ -0,0 +1,775 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/signal.c | ||
3 | * | ||
4 | * PowerPC version | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * | ||
7 | * Derived from "arch/i386/kernel/signal.c" | ||
8 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
9 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version | ||
14 | * 2 of the License, or (at your option) any later version. | ||
15 | */ | ||
16 | |||
17 | #include <linux/sched.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/smp.h> | ||
20 | #include <linux/smp_lock.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/signal.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/wait.h> | ||
25 | #include <linux/ptrace.h> | ||
26 | #include <linux/unistd.h> | ||
27 | #include <linux/stddef.h> | ||
28 | #include <linux/elf.h> | ||
29 | #include <linux/tty.h> | ||
30 | #include <linux/binfmts.h> | ||
31 | #include <linux/suspend.h> | ||
32 | #include <asm/ucontext.h> | ||
33 | #include <asm/uaccess.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/cacheflush.h> | ||
36 | |||
37 | #undef DEBUG_SIG | ||
38 | |||
39 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
40 | |||
41 | extern void sigreturn_exit(struct pt_regs *); | ||
42 | |||
43 | #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) | ||
44 | |||
45 | int do_signal(sigset_t *oldset, struct pt_regs *regs); | ||
46 | |||
47 | /* | ||
48 | * Atomically swap in the new signal mask, and wait for a signal. | ||
49 | */ | ||
50 | int | ||
51 | sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7, | ||
52 | struct pt_regs *regs) | ||
53 | { | ||
54 | sigset_t saveset; | ||
55 | |||
56 | mask &= _BLOCKABLE; | ||
57 | spin_lock_irq(¤t->sighand->siglock); | ||
58 | saveset = current->blocked; | ||
59 | siginitset(¤t->blocked, mask); | ||
60 | recalc_sigpending(); | ||
61 | spin_unlock_irq(¤t->sighand->siglock); | ||
62 | |||
63 | regs->result = -EINTR; | ||
64 | regs->gpr[3] = EINTR; | ||
65 | regs->ccr |= 0x10000000; | ||
66 | while (1) { | ||
67 | current->state = TASK_INTERRUPTIBLE; | ||
68 | schedule(); | ||
69 | if (do_signal(&saveset, regs)) | ||
70 | sigreturn_exit(regs); | ||
71 | } | ||
72 | } | ||
73 | |||
74 | int | ||
75 | sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, int p3, int p4, | ||
76 | int p6, int p7, struct pt_regs *regs) | ||
77 | { | ||
78 | sigset_t saveset, newset; | ||
79 | |||
80 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
81 | if (sigsetsize != sizeof(sigset_t)) | ||
82 | return -EINVAL; | ||
83 | |||
84 | if (copy_from_user(&newset, unewset, sizeof(newset))) | ||
85 | return -EFAULT; | ||
86 | sigdelsetmask(&newset, ~_BLOCKABLE); | ||
87 | |||
88 | spin_lock_irq(¤t->sighand->siglock); | ||
89 | saveset = current->blocked; | ||
90 | current->blocked = newset; | ||
91 | recalc_sigpending(); | ||
92 | spin_unlock_irq(¤t->sighand->siglock); | ||
93 | |||
94 | regs->result = -EINTR; | ||
95 | regs->gpr[3] = EINTR; | ||
96 | regs->ccr |= 0x10000000; | ||
97 | while (1) { | ||
98 | current->state = TASK_INTERRUPTIBLE; | ||
99 | schedule(); | ||
100 | if (do_signal(&saveset, regs)) | ||
101 | sigreturn_exit(regs); | ||
102 | } | ||
103 | } | ||
104 | |||
105 | |||
106 | int | ||
107 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, int r5, | ||
108 | int r6, int r7, int r8, struct pt_regs *regs) | ||
109 | { | ||
110 | return do_sigaltstack(uss, uoss, regs->gpr[1]); | ||
111 | } | ||
112 | |||
113 | int | ||
114 | sys_sigaction(int sig, const struct old_sigaction __user *act, | ||
115 | struct old_sigaction __user *oact) | ||
116 | { | ||
117 | struct k_sigaction new_ka, old_ka; | ||
118 | int ret; | ||
119 | |||
120 | if (act) { | ||
121 | old_sigset_t mask; | ||
122 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | ||
123 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | ||
124 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) | ||
125 | return -EFAULT; | ||
126 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
127 | __get_user(mask, &act->sa_mask); | ||
128 | siginitset(&new_ka.sa.sa_mask, mask); | ||
129 | } | ||
130 | |||
131 | ret = do_sigaction(sig, (act? &new_ka: NULL), (oact? &old_ka: NULL)); | ||
132 | |||
133 | if (!ret && oact) { | ||
134 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | ||
135 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | ||
136 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) | ||
137 | return -EFAULT; | ||
138 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
139 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
140 | } | ||
141 | |||
142 | return ret; | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * When we have signals to deliver, we set up on the | ||
147 | * user stack, going down from the original stack pointer: | ||
148 | * a sigregs struct | ||
149 | * a sigcontext struct | ||
150 | * a gap of __SIGNAL_FRAMESIZE bytes | ||
151 | * | ||
152 | * Each of these things must be a multiple of 16 bytes in size. | ||
153 | * | ||
154 | */ | ||
155 | struct sigregs { | ||
156 | struct mcontext mctx; /* all the register values */ | ||
157 | /* Programs using the rs6000/xcoff abi can save up to 19 gp regs | ||
158 | and 18 fp regs below sp before decrementing it. */ | ||
159 | int abigap[56]; | ||
160 | }; | ||
161 | |||
162 | /* We use the mc_pad field for the signal return trampoline. */ | ||
163 | #define tramp mc_pad | ||
164 | |||
165 | /* | ||
166 | * When we have rt signals to deliver, we set up on the | ||
167 | * user stack, going down from the original stack pointer: | ||
168 | * one rt_sigframe struct (siginfo + ucontext + ABI gap) | ||
169 | * a gap of __SIGNAL_FRAMESIZE+16 bytes | ||
170 | * (the +16 is to get the siginfo and ucontext in the same | ||
171 | * positions as in older kernels). | ||
172 | * | ||
173 | * Each of these things must be a multiple of 16 bytes in size. | ||
174 | * | ||
175 | */ | ||
176 | struct rt_sigframe | ||
177 | { | ||
178 | struct siginfo info; | ||
179 | struct ucontext uc; | ||
180 | /* Programs using the rs6000/xcoff abi can save up to 19 gp regs | ||
181 | and 18 fp regs below sp before decrementing it. */ | ||
182 | int abigap[56]; | ||
183 | }; | ||
184 | |||
185 | /* | ||
186 | * Save the current user registers on the user stack. | ||
187 | * We only save the altivec/spe registers if the process has used | ||
188 | * altivec/spe instructions at some point. | ||
189 | */ | ||
190 | static int | ||
191 | save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, int sigret) | ||
192 | { | ||
193 | /* save general and floating-point registers */ | ||
194 | CHECK_FULL_REGS(regs); | ||
195 | preempt_disable(); | ||
196 | if (regs->msr & MSR_FP) | ||
197 | giveup_fpu(current); | ||
198 | #ifdef CONFIG_ALTIVEC | ||
199 | if (current->thread.used_vr && (regs->msr & MSR_VEC)) | ||
200 | giveup_altivec(current); | ||
201 | #endif /* CONFIG_ALTIVEC */ | ||
202 | #ifdef CONFIG_SPE | ||
203 | if (current->thread.used_spe && (regs->msr & MSR_SPE)) | ||
204 | giveup_spe(current); | ||
205 | #endif /* CONFIG_ALTIVEC */ | ||
206 | preempt_enable(); | ||
207 | |||
208 | if (__copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE) | ||
209 | || __copy_to_user(&frame->mc_fregs, current->thread.fpr, | ||
210 | ELF_NFPREG * sizeof(double))) | ||
211 | return 1; | ||
212 | |||
213 | current->thread.fpscr = 0; /* turn off all fp exceptions */ | ||
214 | |||
215 | #ifdef CONFIG_ALTIVEC | ||
216 | /* save altivec registers */ | ||
217 | if (current->thread.used_vr) { | ||
218 | if (__copy_to_user(&frame->mc_vregs, current->thread.vr, | ||
219 | ELF_NVRREG * sizeof(vector128))) | ||
220 | return 1; | ||
221 | /* set MSR_VEC in the saved MSR value to indicate that | ||
222 | frame->mc_vregs contains valid data */ | ||
223 | if (__put_user(regs->msr | MSR_VEC, &frame->mc_gregs[PT_MSR])) | ||
224 | return 1; | ||
225 | } | ||
226 | /* else assert((regs->msr & MSR_VEC) == 0) */ | ||
227 | |||
228 | /* We always copy to/from vrsave, it's 0 if we don't have or don't | ||
229 | * use altivec. Since VSCR only contains 32 bits saved in the least | ||
230 | * significant bits of a vector, we "cheat" and stuff VRSAVE in the | ||
231 | * most significant bits of that same vector. --BenH | ||
232 | */ | ||
233 | if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32])) | ||
234 | return 1; | ||
235 | #endif /* CONFIG_ALTIVEC */ | ||
236 | |||
237 | #ifdef CONFIG_SPE | ||
238 | /* save spe registers */ | ||
239 | if (current->thread.used_spe) { | ||
240 | if (__copy_to_user(&frame->mc_vregs, current->thread.evr, | ||
241 | ELF_NEVRREG * sizeof(u32))) | ||
242 | return 1; | ||
243 | /* set MSR_SPE in the saved MSR value to indicate that | ||
244 | frame->mc_vregs contains valid data */ | ||
245 | if (__put_user(regs->msr | MSR_SPE, &frame->mc_gregs[PT_MSR])) | ||
246 | return 1; | ||
247 | } | ||
248 | /* else assert((regs->msr & MSR_SPE) == 0) */ | ||
249 | |||
250 | /* We always copy to/from spefscr */ | ||
251 | if (__put_user(current->thread.spefscr, (u32 *)&frame->mc_vregs + ELF_NEVRREG)) | ||
252 | return 1; | ||
253 | #endif /* CONFIG_SPE */ | ||
254 | |||
255 | if (sigret) { | ||
256 | /* Set up the sigreturn trampoline: li r0,sigret; sc */ | ||
257 | if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) | ||
258 | || __put_user(0x44000002UL, &frame->tramp[1])) | ||
259 | return 1; | ||
260 | flush_icache_range((unsigned long) &frame->tramp[0], | ||
261 | (unsigned long) &frame->tramp[2]); | ||
262 | } | ||
263 | |||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | /* | ||
268 | * Restore the current user register values from the user stack, | ||
269 | * (except for MSR). | ||
270 | */ | ||
271 | static int | ||
272 | restore_user_regs(struct pt_regs *regs, struct mcontext __user *sr, int sig) | ||
273 | { | ||
274 | unsigned long save_r2 = 0; | ||
275 | #if defined(CONFIG_ALTIVEC) || defined(CONFIG_SPE) | ||
276 | unsigned long msr; | ||
277 | #endif | ||
278 | |||
279 | /* backup/restore the TLS as we don't want it to be modified */ | ||
280 | if (!sig) | ||
281 | save_r2 = regs->gpr[2]; | ||
282 | /* copy up to but not including MSR */ | ||
283 | if (__copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t))) | ||
284 | return 1; | ||
285 | /* copy from orig_r3 (the word after the MSR) up to the end */ | ||
286 | if (__copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3], | ||
287 | GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t))) | ||
288 | return 1; | ||
289 | if (!sig) | ||
290 | regs->gpr[2] = save_r2; | ||
291 | |||
292 | /* force the process to reload the FP registers from | ||
293 | current->thread when it next does FP instructions */ | ||
294 | regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); | ||
295 | if (__copy_from_user(current->thread.fpr, &sr->mc_fregs, | ||
296 | sizeof(sr->mc_fregs))) | ||
297 | return 1; | ||
298 | |||
299 | #ifdef CONFIG_ALTIVEC | ||
300 | /* force the process to reload the altivec registers from | ||
301 | current->thread when it next does altivec instructions */ | ||
302 | regs->msr &= ~MSR_VEC; | ||
303 | if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_VEC) != 0) { | ||
304 | /* restore altivec registers from the stack */ | ||
305 | if (__copy_from_user(current->thread.vr, &sr->mc_vregs, | ||
306 | sizeof(sr->mc_vregs))) | ||
307 | return 1; | ||
308 | } else if (current->thread.used_vr) | ||
309 | memset(¤t->thread.vr, 0, ELF_NVRREG * sizeof(vector128)); | ||
310 | |||
311 | /* Always get VRSAVE back */ | ||
312 | if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) | ||
313 | return 1; | ||
314 | #endif /* CONFIG_ALTIVEC */ | ||
315 | |||
316 | #ifdef CONFIG_SPE | ||
317 | /* force the process to reload the spe registers from | ||
318 | current->thread when it next does spe instructions */ | ||
319 | regs->msr &= ~MSR_SPE; | ||
320 | if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_SPE) != 0) { | ||
321 | /* restore spe registers from the stack */ | ||
322 | if (__copy_from_user(current->thread.evr, &sr->mc_vregs, | ||
323 | ELF_NEVRREG * sizeof(u32))) | ||
324 | return 1; | ||
325 | } else if (current->thread.used_spe) | ||
326 | memset(¤t->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); | ||
327 | |||
328 | /* Always get SPEFSCR back */ | ||
329 | if (__get_user(current->thread.spefscr, (u32 *)&sr->mc_vregs + ELF_NEVRREG)) | ||
330 | return 1; | ||
331 | #endif /* CONFIG_SPE */ | ||
332 | |||
333 | #ifndef CONFIG_SMP | ||
334 | preempt_disable(); | ||
335 | if (last_task_used_math == current) | ||
336 | last_task_used_math = NULL; | ||
337 | if (last_task_used_altivec == current) | ||
338 | last_task_used_altivec = NULL; | ||
339 | if (last_task_used_spe == current) | ||
340 | last_task_used_spe = NULL; | ||
341 | preempt_enable(); | ||
342 | #endif | ||
343 | return 0; | ||
344 | } | ||
345 | |||
346 | /* | ||
347 | * Restore the user process's signal mask | ||
348 | */ | ||
349 | static void | ||
350 | restore_sigmask(sigset_t *set) | ||
351 | { | ||
352 | sigdelsetmask(set, ~_BLOCKABLE); | ||
353 | spin_lock_irq(¤t->sighand->siglock); | ||
354 | current->blocked = *set; | ||
355 | recalc_sigpending(); | ||
356 | spin_unlock_irq(¤t->sighand->siglock); | ||
357 | } | ||
358 | |||
359 | /* | ||
360 | * Set up a signal frame for a "real-time" signal handler | ||
361 | * (one which gets siginfo). | ||
362 | */ | ||
363 | static void | ||
364 | handle_rt_signal(unsigned long sig, struct k_sigaction *ka, | ||
365 | siginfo_t *info, sigset_t *oldset, struct pt_regs * regs, | ||
366 | unsigned long newsp) | ||
367 | { | ||
368 | struct rt_sigframe __user *rt_sf; | ||
369 | struct mcontext __user *frame; | ||
370 | unsigned long origsp = newsp; | ||
371 | |||
372 | /* Set up Signal Frame */ | ||
373 | /* Put a Real Time Context onto stack */ | ||
374 | newsp -= sizeof(*rt_sf); | ||
375 | rt_sf = (struct rt_sigframe __user *) newsp; | ||
376 | |||
377 | /* create a stack frame for the caller of the handler */ | ||
378 | newsp -= __SIGNAL_FRAMESIZE + 16; | ||
379 | |||
380 | if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp)) | ||
381 | goto badframe; | ||
382 | |||
383 | /* Put the siginfo & fill in most of the ucontext */ | ||
384 | if (copy_siginfo_to_user(&rt_sf->info, info) | ||
385 | || __put_user(0, &rt_sf->uc.uc_flags) | ||
386 | || __put_user(0, &rt_sf->uc.uc_link) | ||
387 | || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp) | ||
388 | || __put_user(sas_ss_flags(regs->gpr[1]), | ||
389 | &rt_sf->uc.uc_stack.ss_flags) | ||
390 | || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size) | ||
391 | || __put_user(&rt_sf->uc.uc_mcontext, &rt_sf->uc.uc_regs) | ||
392 | || __copy_to_user(&rt_sf->uc.uc_sigmask, oldset, sizeof(*oldset))) | ||
393 | goto badframe; | ||
394 | |||
395 | /* Save user registers on the stack */ | ||
396 | frame = &rt_sf->uc.uc_mcontext; | ||
397 | if (save_user_regs(regs, frame, __NR_rt_sigreturn)) | ||
398 | goto badframe; | ||
399 | |||
400 | if (put_user(regs->gpr[1], (unsigned long __user *)newsp)) | ||
401 | goto badframe; | ||
402 | regs->gpr[1] = newsp; | ||
403 | regs->gpr[3] = sig; | ||
404 | regs->gpr[4] = (unsigned long) &rt_sf->info; | ||
405 | regs->gpr[5] = (unsigned long) &rt_sf->uc; | ||
406 | regs->gpr[6] = (unsigned long) rt_sf; | ||
407 | regs->nip = (unsigned long) ka->sa.sa_handler; | ||
408 | regs->link = (unsigned long) frame->tramp; | ||
409 | regs->trap = 0; | ||
410 | |||
411 | return; | ||
412 | |||
413 | badframe: | ||
414 | #ifdef DEBUG_SIG | ||
415 | printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n", | ||
416 | regs, frame, newsp); | ||
417 | #endif | ||
418 | force_sigsegv(sig, current); | ||
419 | } | ||
420 | |||
421 | static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig) | ||
422 | { | ||
423 | sigset_t set; | ||
424 | struct mcontext __user *mcp; | ||
425 | |||
426 | if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(set)) | ||
427 | || __get_user(mcp, &ucp->uc_regs)) | ||
428 | return -EFAULT; | ||
429 | restore_sigmask(&set); | ||
430 | if (restore_user_regs(regs, mcp, sig)) | ||
431 | return -EFAULT; | ||
432 | |||
433 | return 0; | ||
434 | } | ||
435 | |||
436 | int sys_swapcontext(struct ucontext __user *old_ctx, | ||
437 | struct ucontext __user *new_ctx, | ||
438 | int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) | ||
439 | { | ||
440 | unsigned char tmp; | ||
441 | |||
442 | /* Context size is for future use. Right now, we only make sure | ||
443 | * we are passed something we understand | ||
444 | */ | ||
445 | if (ctx_size < sizeof(struct ucontext)) | ||
446 | return -EINVAL; | ||
447 | |||
448 | if (old_ctx != NULL) { | ||
449 | if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx)) | ||
450 | || save_user_regs(regs, &old_ctx->uc_mcontext, 0) | ||
451 | || __copy_to_user(&old_ctx->uc_sigmask, | ||
452 | ¤t->blocked, sizeof(sigset_t)) | ||
453 | || __put_user(&old_ctx->uc_mcontext, &old_ctx->uc_regs)) | ||
454 | return -EFAULT; | ||
455 | } | ||
456 | if (new_ctx == NULL) | ||
457 | return 0; | ||
458 | if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx)) | ||
459 | || __get_user(tmp, (u8 __user *) new_ctx) | ||
460 | || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1)) | ||
461 | return -EFAULT; | ||
462 | |||
463 | /* | ||
464 | * If we get a fault copying the context into the kernel's | ||
465 | * image of the user's registers, we can't just return -EFAULT | ||
466 | * because the user's registers will be corrupted. For instance | ||
467 | * the NIP value may have been updated but not some of the | ||
468 | * other registers. Given that we have done the access_ok | ||
469 | * and successfully read the first and last bytes of the region | ||
470 | * above, this should only happen in an out-of-memory situation | ||
471 | * or if another thread unmaps the region containing the context. | ||
472 | * We kill the task with a SIGSEGV in this situation. | ||
473 | */ | ||
474 | if (do_setcontext(new_ctx, regs, 0)) | ||
475 | do_exit(SIGSEGV); | ||
476 | sigreturn_exit(regs); | ||
477 | /* doesn't actually return back to here */ | ||
478 | return 0; | ||
479 | } | ||
480 | |||
481 | int sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, | ||
482 | struct pt_regs *regs) | ||
483 | { | ||
484 | struct rt_sigframe __user *rt_sf; | ||
485 | |||
486 | /* Always make any pending restarted system calls return -EINTR */ | ||
487 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
488 | |||
489 | rt_sf = (struct rt_sigframe __user *) | ||
490 | (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16); | ||
491 | if (!access_ok(VERIFY_READ, rt_sf, sizeof(struct rt_sigframe))) | ||
492 | goto bad; | ||
493 | if (do_setcontext(&rt_sf->uc, regs, 1)) | ||
494 | goto bad; | ||
495 | |||
496 | /* | ||
497 | * It's not clear whether or why it is desirable to save the | ||
498 | * sigaltstack setting on signal delivery and restore it on | ||
499 | * signal return. But other architectures do this and we have | ||
500 | * always done it up until now so it is probably better not to | ||
501 | * change it. -- paulus | ||
502 | */ | ||
503 | do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]); | ||
504 | |||
505 | sigreturn_exit(regs); /* doesn't return here */ | ||
506 | return 0; | ||
507 | |||
508 | bad: | ||
509 | force_sig(SIGSEGV, current); | ||
510 | return 0; | ||
511 | } | ||
512 | |||
513 | int sys_debug_setcontext(struct ucontext __user *ctx, | ||
514 | int ndbg, struct sig_dbg_op *dbg, | ||
515 | int r6, int r7, int r8, | ||
516 | struct pt_regs *regs) | ||
517 | { | ||
518 | struct sig_dbg_op op; | ||
519 | int i; | ||
520 | unsigned long new_msr = regs->msr; | ||
521 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
522 | unsigned long new_dbcr0 = current->thread.dbcr0; | ||
523 | #endif | ||
524 | |||
525 | for (i=0; i<ndbg; i++) { | ||
526 | if (__copy_from_user(&op, dbg, sizeof(op))) | ||
527 | return -EFAULT; | ||
528 | switch (op.dbg_type) { | ||
529 | case SIG_DBG_SINGLE_STEPPING: | ||
530 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
531 | if (op.dbg_value) { | ||
532 | new_msr |= MSR_DE; | ||
533 | new_dbcr0 |= (DBCR0_IDM | DBCR0_IC); | ||
534 | } else { | ||
535 | new_msr &= ~MSR_DE; | ||
536 | new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC); | ||
537 | } | ||
538 | #else | ||
539 | if (op.dbg_value) | ||
540 | new_msr |= MSR_SE; | ||
541 | else | ||
542 | new_msr &= ~MSR_SE; | ||
543 | #endif | ||
544 | break; | ||
545 | case SIG_DBG_BRANCH_TRACING: | ||
546 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
547 | return -EINVAL; | ||
548 | #else | ||
549 | if (op.dbg_value) | ||
550 | new_msr |= MSR_BE; | ||
551 | else | ||
552 | new_msr &= ~MSR_BE; | ||
553 | #endif | ||
554 | break; | ||
555 | |||
556 | default: | ||
557 | return -EINVAL; | ||
558 | } | ||
559 | } | ||
560 | |||
561 | /* We wait until here to actually install the values in the | ||
562 | registers so if we fail in the above loop, it will not | ||
563 | affect the contents of these registers. After this point, | ||
564 | failure is a problem, anyway, and it's very unlikely unless | ||
565 | the user is really doing something wrong. */ | ||
566 | regs->msr = new_msr; | ||
567 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
568 | current->thread.dbcr0 = new_dbcr0; | ||
569 | #endif | ||
570 | |||
571 | /* | ||
572 | * If we get a fault copying the context into the kernel's | ||
573 | * image of the user's registers, we can't just return -EFAULT | ||
574 | * because the user's registers will be corrupted. For instance | ||
575 | * the NIP value may have been updated but not some of the | ||
576 | * other registers. Given that we have done the access_ok | ||
577 | * and successfully read the first and last bytes of the region | ||
578 | * above, this should only happen in an out-of-memory situation | ||
579 | * or if another thread unmaps the region containing the context. | ||
580 | * We kill the task with a SIGSEGV in this situation. | ||
581 | */ | ||
582 | if (do_setcontext(ctx, regs, 1)) { | ||
583 | force_sig(SIGSEGV, current); | ||
584 | goto out; | ||
585 | } | ||
586 | |||
587 | /* | ||
588 | * It's not clear whether or why it is desirable to save the | ||
589 | * sigaltstack setting on signal delivery and restore it on | ||
590 | * signal return. But other architectures do this and we have | ||
591 | * always done it up until now so it is probably better not to | ||
592 | * change it. -- paulus | ||
593 | */ | ||
594 | do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]); | ||
595 | |||
596 | sigreturn_exit(regs); | ||
597 | /* doesn't actually return back to here */ | ||
598 | |||
599 | out: | ||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | /* | ||
604 | * OK, we're invoking a handler | ||
605 | */ | ||
606 | static void | ||
607 | handle_signal(unsigned long sig, struct k_sigaction *ka, | ||
608 | siginfo_t *info, sigset_t *oldset, struct pt_regs * regs, | ||
609 | unsigned long newsp) | ||
610 | { | ||
611 | struct sigcontext __user *sc; | ||
612 | struct sigregs __user *frame; | ||
613 | unsigned long origsp = newsp; | ||
614 | |||
615 | /* Set up Signal Frame */ | ||
616 | newsp -= sizeof(struct sigregs); | ||
617 | frame = (struct sigregs __user *) newsp; | ||
618 | |||
619 | /* Put a sigcontext on the stack */ | ||
620 | newsp -= sizeof(*sc); | ||
621 | sc = (struct sigcontext __user *) newsp; | ||
622 | |||
623 | /* create a stack frame for the caller of the handler */ | ||
624 | newsp -= __SIGNAL_FRAMESIZE; | ||
625 | |||
626 | if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp)) | ||
627 | goto badframe; | ||
628 | |||
629 | #if _NSIG != 64 | ||
630 | #error "Please adjust handle_signal()" | ||
631 | #endif | ||
632 | if (__put_user((unsigned long) ka->sa.sa_handler, &sc->handler) | ||
633 | || __put_user(oldset->sig[0], &sc->oldmask) | ||
634 | || __put_user(oldset->sig[1], &sc->_unused[3]) | ||
635 | || __put_user((struct pt_regs *)frame, &sc->regs) | ||
636 | || __put_user(sig, &sc->signal)) | ||
637 | goto badframe; | ||
638 | |||
639 | if (save_user_regs(regs, &frame->mctx, __NR_sigreturn)) | ||
640 | goto badframe; | ||
641 | |||
642 | if (put_user(regs->gpr[1], (unsigned long __user *)newsp)) | ||
643 | goto badframe; | ||
644 | regs->gpr[1] = newsp; | ||
645 | regs->gpr[3] = sig; | ||
646 | regs->gpr[4] = (unsigned long) sc; | ||
647 | regs->nip = (unsigned long) ka->sa.sa_handler; | ||
648 | regs->link = (unsigned long) frame->mctx.tramp; | ||
649 | regs->trap = 0; | ||
650 | |||
651 | return; | ||
652 | |||
653 | badframe: | ||
654 | #ifdef DEBUG_SIG | ||
655 | printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n", | ||
656 | regs, frame, newsp); | ||
657 | #endif | ||
658 | force_sigsegv(sig, current); | ||
659 | } | ||
660 | |||
661 | /* | ||
662 | * Do a signal return; undo the signal stack. | ||
663 | */ | ||
664 | int sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, | ||
665 | struct pt_regs *regs) | ||
666 | { | ||
667 | struct sigcontext __user *sc; | ||
668 | struct sigcontext sigctx; | ||
669 | struct mcontext __user *sr; | ||
670 | sigset_t set; | ||
671 | |||
672 | /* Always make any pending restarted system calls return -EINTR */ | ||
673 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
674 | |||
675 | sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE); | ||
676 | if (copy_from_user(&sigctx, sc, sizeof(sigctx))) | ||
677 | goto badframe; | ||
678 | |||
679 | set.sig[0] = sigctx.oldmask; | ||
680 | set.sig[1] = sigctx._unused[3]; | ||
681 | restore_sigmask(&set); | ||
682 | |||
683 | sr = (struct mcontext __user *) sigctx.regs; | ||
684 | if (!access_ok(VERIFY_READ, sr, sizeof(*sr)) | ||
685 | || restore_user_regs(regs, sr, 1)) | ||
686 | goto badframe; | ||
687 | |||
688 | sigreturn_exit(regs); /* doesn't return */ | ||
689 | return 0; | ||
690 | |||
691 | badframe: | ||
692 | force_sig(SIGSEGV, current); | ||
693 | return 0; | ||
694 | } | ||
695 | |||
696 | /* | ||
697 | * Note that 'init' is a special process: it doesn't get signals it doesn't | ||
698 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | ||
699 | * mistake. | ||
700 | */ | ||
701 | int do_signal(sigset_t *oldset, struct pt_regs *regs) | ||
702 | { | ||
703 | siginfo_t info; | ||
704 | struct k_sigaction ka; | ||
705 | unsigned long frame, newsp; | ||
706 | int signr, ret; | ||
707 | |||
708 | if (current->flags & PF_FREEZE) { | ||
709 | refrigerator(PF_FREEZE); | ||
710 | signr = 0; | ||
711 | ret = regs->gpr[3]; | ||
712 | if (!signal_pending(current)) | ||
713 | goto no_signal; | ||
714 | } | ||
715 | |||
716 | if (!oldset) | ||
717 | oldset = ¤t->blocked; | ||
718 | |||
719 | newsp = frame = 0; | ||
720 | |||
721 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
722 | |||
723 | if (TRAP(regs) == 0x0C00 /* System Call! */ | ||
724 | && regs->ccr & 0x10000000 /* error signalled */ | ||
725 | && ((ret = regs->gpr[3]) == ERESTARTSYS | ||
726 | || ret == ERESTARTNOHAND || ret == ERESTARTNOINTR | ||
727 | || ret == ERESTART_RESTARTBLOCK)) { | ||
728 | |||
729 | if (signr > 0 | ||
730 | && (ret == ERESTARTNOHAND || ret == ERESTART_RESTARTBLOCK | ||
731 | || (ret == ERESTARTSYS | ||
732 | && !(ka.sa.sa_flags & SA_RESTART)))) { | ||
733 | /* make the system call return an EINTR error */ | ||
734 | regs->result = -EINTR; | ||
735 | regs->gpr[3] = EINTR; | ||
736 | /* note that the cr0.SO bit is already set */ | ||
737 | } else { | ||
738 | no_signal: | ||
739 | regs->nip -= 4; /* Back up & retry system call */ | ||
740 | regs->result = 0; | ||
741 | regs->trap = 0; | ||
742 | if (ret == ERESTART_RESTARTBLOCK) | ||
743 | regs->gpr[0] = __NR_restart_syscall; | ||
744 | else | ||
745 | regs->gpr[3] = regs->orig_gpr3; | ||
746 | } | ||
747 | } | ||
748 | |||
749 | if (signr == 0) | ||
750 | return 0; /* no signals delivered */ | ||
751 | |||
752 | if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size | ||
753 | && !on_sig_stack(regs->gpr[1])) | ||
754 | newsp = current->sas_ss_sp + current->sas_ss_size; | ||
755 | else | ||
756 | newsp = regs->gpr[1]; | ||
757 | newsp &= ~0xfUL; | ||
758 | |||
759 | /* Whee! Actually deliver the signal. */ | ||
760 | if (ka.sa.sa_flags & SA_SIGINFO) | ||
761 | handle_rt_signal(signr, &ka, &info, oldset, regs, newsp); | ||
762 | else | ||
763 | handle_signal(signr, &ka, &info, oldset, regs, newsp); | ||
764 | |||
765 | if (!(ka.sa.sa_flags & SA_NODEFER)) { | ||
766 | spin_lock_irq(¤t->sighand->siglock); | ||
767 | sigorsets(¤t->blocked,¤t->blocked,&ka.sa.sa_mask); | ||
768 | sigaddset(¤t->blocked, signr); | ||
769 | recalc_sigpending(); | ||
770 | spin_unlock_irq(¤t->sighand->siglock); | ||
771 | } | ||
772 | |||
773 | return 1; | ||
774 | } | ||
775 | |||
diff --git a/arch/ppc/kernel/smp-tbsync.c b/arch/ppc/kernel/smp-tbsync.c new file mode 100644 index 000000000000..2c9cd95bcea6 --- /dev/null +++ b/arch/ppc/kernel/smp-tbsync.c | |||
@@ -0,0 +1,181 @@ | |||
1 | /* | ||
2 | * Smp timebase synchronization for ppc. | ||
3 | * | ||
4 | * Copyright (C) 2003 Samuel Rydh (samuel@ibrium.se) | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/config.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/smp.h> | ||
12 | #include <linux/unistd.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <asm/atomic.h> | ||
15 | #include <asm/smp.h> | ||
16 | #include <asm/time.h> | ||
17 | |||
18 | #define NUM_ITER 300 | ||
19 | |||
20 | enum { | ||
21 | kExit=0, kSetAndTest, kTest | ||
22 | }; | ||
23 | |||
24 | static struct { | ||
25 | volatile int tbu; | ||
26 | volatile int tbl; | ||
27 | volatile int mark; | ||
28 | volatile int cmd; | ||
29 | volatile int handshake; | ||
30 | int filler[3]; | ||
31 | |||
32 | volatile int ack; | ||
33 | int filler2[7]; | ||
34 | |||
35 | volatile int race_result; | ||
36 | } *tbsync; | ||
37 | |||
38 | static volatile int running; | ||
39 | |||
40 | static void __devinit | ||
41 | enter_contest( int mark, int add ) | ||
42 | { | ||
43 | while( (int)(get_tbl() - mark) < 0 ) | ||
44 | tbsync->race_result = add; | ||
45 | } | ||
46 | |||
47 | void __devinit | ||
48 | smp_generic_take_timebase( void ) | ||
49 | { | ||
50 | int cmd, tbl, tbu; | ||
51 | |||
52 | local_irq_disable(); | ||
53 | while( !running ) | ||
54 | ; | ||
55 | rmb(); | ||
56 | |||
57 | for( ;; ) { | ||
58 | tbsync->ack = 1; | ||
59 | while( !tbsync->handshake ) | ||
60 | ; | ||
61 | rmb(); | ||
62 | |||
63 | cmd = tbsync->cmd; | ||
64 | tbl = tbsync->tbl; | ||
65 | tbu = tbsync->tbu; | ||
66 | tbsync->ack = 0; | ||
67 | if( cmd == kExit ) | ||
68 | return; | ||
69 | |||
70 | if( cmd == kSetAndTest ) { | ||
71 | while( tbsync->handshake ) | ||
72 | ; | ||
73 | asm volatile ("mttbl %0" :: "r" (tbl) ); | ||
74 | asm volatile ("mttbu %0" :: "r" (tbu) ); | ||
75 | } else { | ||
76 | while( tbsync->handshake ) | ||
77 | ; | ||
78 | } | ||
79 | enter_contest( tbsync->mark, -1 ); | ||
80 | } | ||
81 | local_irq_enable(); | ||
82 | } | ||
83 | |||
84 | static int __devinit | ||
85 | start_contest( int cmd, int offset, int num ) | ||
86 | { | ||
87 | int i, tbu, tbl, mark, score=0; | ||
88 | |||
89 | tbsync->cmd = cmd; | ||
90 | |||
91 | local_irq_disable(); | ||
92 | for( i=-3; i<num; ) { | ||
93 | tbl = get_tbl() + 400; | ||
94 | tbsync->tbu = tbu = get_tbu(); | ||
95 | tbsync->tbl = tbl + offset; | ||
96 | tbsync->mark = mark = tbl + 400; | ||
97 | |||
98 | wmb(); | ||
99 | |||
100 | tbsync->handshake = 1; | ||
101 | while( tbsync->ack ) | ||
102 | ; | ||
103 | |||
104 | while( (int)(get_tbl() - tbl) <= 0 ) | ||
105 | ; | ||
106 | tbsync->handshake = 0; | ||
107 | enter_contest( mark, 1 ); | ||
108 | |||
109 | while( !tbsync->ack ) | ||
110 | ; | ||
111 | |||
112 | if( tbsync->tbu != get_tbu() || ((tbsync->tbl ^ get_tbl()) & 0x80000000) ) | ||
113 | continue; | ||
114 | if( i++ > 0 ) | ||
115 | score += tbsync->race_result; | ||
116 | } | ||
117 | local_irq_enable(); | ||
118 | return score; | ||
119 | } | ||
120 | |||
121 | void __devinit | ||
122 | smp_generic_give_timebase( void ) | ||
123 | { | ||
124 | int i, score, score2, old, min=0, max=5000, offset=1000; | ||
125 | |||
126 | printk("Synchronizing timebase\n"); | ||
127 | |||
128 | /* if this fails then this kernel won't work anyway... */ | ||
129 | tbsync = kmalloc( sizeof(*tbsync), GFP_KERNEL ); | ||
130 | memset( tbsync, 0, sizeof(*tbsync) ); | ||
131 | mb(); | ||
132 | running = 1; | ||
133 | |||
134 | while( !tbsync->ack ) | ||
135 | ; | ||
136 | |||
137 | /* binary search */ | ||
138 | for( old=-1 ; old != offset ; offset=(min+max)/2 ) { | ||
139 | score = start_contest( kSetAndTest, offset, NUM_ITER ); | ||
140 | |||
141 | printk("score %d, offset %d\n", score, offset ); | ||
142 | |||
143 | if( score > 0 ) | ||
144 | max = offset; | ||
145 | else | ||
146 | min = offset; | ||
147 | old = offset; | ||
148 | } | ||
149 | score = start_contest( kSetAndTest, min, NUM_ITER ); | ||
150 | score2 = start_contest( kSetAndTest, max, NUM_ITER ); | ||
151 | |||
152 | printk( "Min %d (score %d), Max %d (score %d)\n", min, score, max, score2 ); | ||
153 | score = abs( score ); | ||
154 | score2 = abs( score2 ); | ||
155 | offset = (score < score2) ? min : max; | ||
156 | |||
157 | /* guard against inaccurate mttb */ | ||
158 | for( i=0; i<10; i++ ) { | ||
159 | start_contest( kSetAndTest, offset, NUM_ITER/10 ); | ||
160 | |||
161 | if( (score2=start_contest(kTest, offset, NUM_ITER)) < 0 ) | ||
162 | score2 = -score2; | ||
163 | if( score2 <= score || score2 < 20 ) | ||
164 | break; | ||
165 | } | ||
166 | printk("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER ); | ||
167 | |||
168 | /* exiting */ | ||
169 | tbsync->cmd = kExit; | ||
170 | wmb(); | ||
171 | tbsync->handshake = 1; | ||
172 | while( tbsync->ack ) | ||
173 | ; | ||
174 | tbsync->handshake = 0; | ||
175 | kfree( tbsync ); | ||
176 | tbsync = NULL; | ||
177 | running = 0; | ||
178 | |||
179 | /* all done */ | ||
180 | smp_tb_synchronized = 1; | ||
181 | } | ||
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c new file mode 100644 index 000000000000..e70b587b9e51 --- /dev/null +++ b/arch/ppc/kernel/smp.c | |||
@@ -0,0 +1,399 @@ | |||
1 | /* | ||
2 | * Smp support for ppc. | ||
3 | * | ||
4 | * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great | ||
5 | * deal of code from the sparc and intel versions. | ||
6 | * | ||
7 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/config.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/smp.h> | ||
16 | #include <linux/smp_lock.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/kernel_stat.h> | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/spinlock.h> | ||
22 | #include <linux/cache.h> | ||
23 | |||
24 | #include <asm/ptrace.h> | ||
25 | #include <asm/atomic.h> | ||
26 | #include <asm/irq.h> | ||
27 | #include <asm/page.h> | ||
28 | #include <asm/pgtable.h> | ||
29 | #include <asm/io.h> | ||
30 | #include <asm/prom.h> | ||
31 | #include <asm/smp.h> | ||
32 | #include <asm/residual.h> | ||
33 | #include <asm/time.h> | ||
34 | #include <asm/thread_info.h> | ||
35 | #include <asm/tlbflush.h> | ||
36 | #include <asm/xmon.h> | ||
37 | |||
38 | volatile int smp_commenced; | ||
39 | int smp_tb_synchronized; | ||
40 | struct cpuinfo_PPC cpu_data[NR_CPUS]; | ||
41 | struct klock_info_struct klock_info = { KLOCK_CLEAR, 0 }; | ||
42 | atomic_t ipi_recv; | ||
43 | atomic_t ipi_sent; | ||
44 | cpumask_t cpu_online_map; | ||
45 | cpumask_t cpu_possible_map; | ||
46 | int smp_hw_index[NR_CPUS]; | ||
47 | struct thread_info *secondary_ti; | ||
48 | |||
49 | EXPORT_SYMBOL(cpu_online_map); | ||
50 | EXPORT_SYMBOL(cpu_possible_map); | ||
51 | |||
52 | /* SMP operations for this machine */ | ||
53 | static struct smp_ops_t *smp_ops; | ||
54 | |||
55 | /* all cpu mappings are 1-1 -- Cort */ | ||
56 | volatile unsigned long cpu_callin_map[NR_CPUS]; | ||
57 | |||
58 | int start_secondary(void *); | ||
59 | void smp_call_function_interrupt(void); | ||
60 | static int __smp_call_function(void (*func) (void *info), void *info, | ||
61 | int wait, int target); | ||
62 | |||
63 | /* Low level assembly function used to backup CPU 0 state */ | ||
64 | extern void __save_cpu_setup(void); | ||
65 | |||
66 | /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. | ||
67 | * | ||
68 | * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up | ||
69 | * in /proc/interrupts will be wrong!!! --Troy */ | ||
70 | #define PPC_MSG_CALL_FUNCTION 0 | ||
71 | #define PPC_MSG_RESCHEDULE 1 | ||
72 | #define PPC_MSG_INVALIDATE_TLB 2 | ||
73 | #define PPC_MSG_XMON_BREAK 3 | ||
74 | |||
75 | static inline void | ||
76 | smp_message_pass(int target, int msg, unsigned long data, int wait) | ||
77 | { | ||
78 | if (smp_ops){ | ||
79 | atomic_inc(&ipi_sent); | ||
80 | smp_ops->message_pass(target,msg,data,wait); | ||
81 | } | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * Common functions | ||
86 | */ | ||
87 | void smp_message_recv(int msg, struct pt_regs *regs) | ||
88 | { | ||
89 | atomic_inc(&ipi_recv); | ||
90 | |||
91 | switch( msg ) { | ||
92 | case PPC_MSG_CALL_FUNCTION: | ||
93 | smp_call_function_interrupt(); | ||
94 | break; | ||
95 | case PPC_MSG_RESCHEDULE: | ||
96 | set_need_resched(); | ||
97 | break; | ||
98 | case PPC_MSG_INVALIDATE_TLB: | ||
99 | _tlbia(); | ||
100 | break; | ||
101 | #ifdef CONFIG_XMON | ||
102 | case PPC_MSG_XMON_BREAK: | ||
103 | xmon(regs); | ||
104 | break; | ||
105 | #endif /* CONFIG_XMON */ | ||
106 | default: | ||
107 | printk("SMP %d: smp_message_recv(): unknown msg %d\n", | ||
108 | smp_processor_id(), msg); | ||
109 | break; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * 750's don't broadcast tlb invalidates so | ||
115 | * we have to emulate that behavior. | ||
116 | * -- Cort | ||
117 | */ | ||
118 | void smp_send_tlb_invalidate(int cpu) | ||
119 | { | ||
120 | if ( PVR_VER(mfspr(SPRN_PVR)) == 8 ) | ||
121 | smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_INVALIDATE_TLB, 0, 0); | ||
122 | } | ||
123 | |||
124 | void smp_send_reschedule(int cpu) | ||
125 | { | ||
126 | /* | ||
127 | * This is only used if `cpu' is running an idle task, | ||
128 | * so it will reschedule itself anyway... | ||
129 | * | ||
130 | * This isn't the case anymore since the other CPU could be | ||
131 | * sleeping and won't reschedule until the next interrupt (such | ||
132 | * as the timer). | ||
133 | * -- Cort | ||
134 | */ | ||
135 | /* This is only used if `cpu' is running an idle task, | ||
136 | so it will reschedule itself anyway... */ | ||
137 | smp_message_pass(cpu, PPC_MSG_RESCHEDULE, 0, 0); | ||
138 | } | ||
139 | |||
140 | #ifdef CONFIG_XMON | ||
141 | void smp_send_xmon_break(int cpu) | ||
142 | { | ||
143 | smp_message_pass(cpu, PPC_MSG_XMON_BREAK, 0, 0); | ||
144 | } | ||
145 | #endif /* CONFIG_XMON */ | ||
146 | |||
147 | static void stop_this_cpu(void *dummy) | ||
148 | { | ||
149 | local_irq_disable(); | ||
150 | while (1) | ||
151 | ; | ||
152 | } | ||
153 | |||
154 | void smp_send_stop(void) | ||
155 | { | ||
156 | smp_call_function(stop_this_cpu, NULL, 1, 0); | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * Structure and data for smp_call_function(). This is designed to minimise | ||
161 | * static memory requirements. It also looks cleaner. | ||
162 | * Stolen from the i386 version. | ||
163 | */ | ||
164 | static DEFINE_SPINLOCK(call_lock); | ||
165 | |||
166 | static struct call_data_struct { | ||
167 | void (*func) (void *info); | ||
168 | void *info; | ||
169 | atomic_t started; | ||
170 | atomic_t finished; | ||
171 | int wait; | ||
172 | } *call_data; | ||
173 | |||
174 | /* | ||
175 | * this function sends a 'generic call function' IPI to all other CPUs | ||
176 | * in the system. | ||
177 | */ | ||
178 | |||
179 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | ||
180 | int wait) | ||
181 | /* | ||
182 | * [SUMMARY] Run a function on all other CPUs. | ||
183 | * <func> The function to run. This must be fast and non-blocking. | ||
184 | * <info> An arbitrary pointer to pass to the function. | ||
185 | * <nonatomic> currently unused. | ||
186 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | ||
187 | * [RETURNS] 0 on success, else a negative status code. Does not return until | ||
188 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | ||
189 | * | ||
190 | * You must not call this function with disabled interrupts or from a | ||
191 | * hardware interrupt handler or from a bottom half handler. | ||
192 | */ | ||
193 | { | ||
194 | /* FIXME: get cpu lock with hotplug cpus, or change this to | ||
195 | bitmask. --RR */ | ||
196 | if (num_online_cpus() <= 1) | ||
197 | return 0; | ||
198 | /* Can deadlock when called with interrupts disabled */ | ||
199 | WARN_ON(irqs_disabled()); | ||
200 | return __smp_call_function(func, info, wait, MSG_ALL_BUT_SELF); | ||
201 | } | ||
202 | |||
203 | static int __smp_call_function(void (*func) (void *info), void *info, | ||
204 | int wait, int target) | ||
205 | { | ||
206 | struct call_data_struct data; | ||
207 | int ret = -1; | ||
208 | int timeout; | ||
209 | int ncpus = 1; | ||
210 | |||
211 | if (target == MSG_ALL_BUT_SELF) | ||
212 | ncpus = num_online_cpus() - 1; | ||
213 | else if (target == MSG_ALL) | ||
214 | ncpus = num_online_cpus(); | ||
215 | |||
216 | data.func = func; | ||
217 | data.info = info; | ||
218 | atomic_set(&data.started, 0); | ||
219 | data.wait = wait; | ||
220 | if (wait) | ||
221 | atomic_set(&data.finished, 0); | ||
222 | |||
223 | spin_lock(&call_lock); | ||
224 | call_data = &data; | ||
225 | /* Send a message to all other CPUs and wait for them to respond */ | ||
226 | smp_message_pass(target, PPC_MSG_CALL_FUNCTION, 0, 0); | ||
227 | |||
228 | /* Wait for response */ | ||
229 | timeout = 1000000; | ||
230 | while (atomic_read(&data.started) != ncpus) { | ||
231 | if (--timeout == 0) { | ||
232 | printk("smp_call_function on cpu %d: other cpus not responding (%d)\n", | ||
233 | smp_processor_id(), atomic_read(&data.started)); | ||
234 | goto out; | ||
235 | } | ||
236 | barrier(); | ||
237 | udelay(1); | ||
238 | } | ||
239 | |||
240 | if (wait) { | ||
241 | timeout = 1000000; | ||
242 | while (atomic_read(&data.finished) != ncpus) { | ||
243 | if (--timeout == 0) { | ||
244 | printk("smp_call_function on cpu %d: other cpus not finishing (%d/%d)\n", | ||
245 | smp_processor_id(), atomic_read(&data.finished), atomic_read(&data.started)); | ||
246 | goto out; | ||
247 | } | ||
248 | barrier(); | ||
249 | udelay(1); | ||
250 | } | ||
251 | } | ||
252 | ret = 0; | ||
253 | |||
254 | out: | ||
255 | spin_unlock(&call_lock); | ||
256 | return ret; | ||
257 | } | ||
258 | |||
259 | void smp_call_function_interrupt(void) | ||
260 | { | ||
261 | void (*func) (void *info) = call_data->func; | ||
262 | void *info = call_data->info; | ||
263 | int wait = call_data->wait; | ||
264 | |||
265 | /* | ||
266 | * Notify initiating CPU that I've grabbed the data and am | ||
267 | * about to execute the function | ||
268 | */ | ||
269 | atomic_inc(&call_data->started); | ||
270 | /* | ||
271 | * At this point the info structure may be out of scope unless wait==1 | ||
272 | */ | ||
273 | (*func)(info); | ||
274 | if (wait) | ||
275 | atomic_inc(&call_data->finished); | ||
276 | } | ||
277 | |||
278 | static void __devinit smp_store_cpu_info(int id) | ||
279 | { | ||
280 | struct cpuinfo_PPC *c = &cpu_data[id]; | ||
281 | |||
282 | /* assume bogomips are same for everything */ | ||
283 | c->loops_per_jiffy = loops_per_jiffy; | ||
284 | c->pvr = mfspr(SPRN_PVR); | ||
285 | } | ||
286 | |||
287 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
288 | { | ||
289 | int num_cpus, i; | ||
290 | |||
291 | /* Fixup boot cpu */ | ||
292 | smp_store_cpu_info(smp_processor_id()); | ||
293 | cpu_callin_map[smp_processor_id()] = 1; | ||
294 | |||
295 | smp_ops = ppc_md.smp_ops; | ||
296 | if (smp_ops == NULL) { | ||
297 | printk("SMP not supported on this machine.\n"); | ||
298 | return; | ||
299 | } | ||
300 | |||
301 | /* Probe platform for CPUs: always linear. */ | ||
302 | num_cpus = smp_ops->probe(); | ||
303 | for (i = 0; i < num_cpus; ++i) | ||
304 | cpu_set(i, cpu_possible_map); | ||
305 | |||
306 | /* Backup CPU 0 state */ | ||
307 | __save_cpu_setup(); | ||
308 | |||
309 | if (smp_ops->space_timers) | ||
310 | smp_ops->space_timers(num_cpus); | ||
311 | } | ||
312 | |||
313 | void __devinit smp_prepare_boot_cpu(void) | ||
314 | { | ||
315 | cpu_set(smp_processor_id(), cpu_online_map); | ||
316 | cpu_set(smp_processor_id(), cpu_possible_map); | ||
317 | } | ||
318 | |||
319 | int __init setup_profiling_timer(unsigned int multiplier) | ||
320 | { | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | /* Processor coming up starts here */ | ||
325 | int __devinit start_secondary(void *unused) | ||
326 | { | ||
327 | int cpu; | ||
328 | |||
329 | atomic_inc(&init_mm.mm_count); | ||
330 | current->active_mm = &init_mm; | ||
331 | |||
332 | cpu = smp_processor_id(); | ||
333 | smp_store_cpu_info(cpu); | ||
334 | set_dec(tb_ticks_per_jiffy); | ||
335 | cpu_callin_map[cpu] = 1; | ||
336 | |||
337 | printk("CPU %i done callin...\n", cpu); | ||
338 | smp_ops->setup_cpu(cpu); | ||
339 | printk("CPU %i done setup...\n", cpu); | ||
340 | local_irq_enable(); | ||
341 | smp_ops->take_timebase(); | ||
342 | printk("CPU %i done timebase take...\n", cpu); | ||
343 | |||
344 | cpu_idle(); | ||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | int __cpu_up(unsigned int cpu) | ||
349 | { | ||
350 | struct task_struct *p; | ||
351 | char buf[32]; | ||
352 | int c; | ||
353 | |||
354 | /* create a process for the processor */ | ||
355 | /* only regs.msr is actually used, and 0 is OK for it */ | ||
356 | p = fork_idle(cpu); | ||
357 | if (IS_ERR(p)) | ||
358 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | ||
359 | secondary_ti = p->thread_info; | ||
360 | p->thread_info->cpu = cpu; | ||
361 | |||
362 | /* | ||
363 | * There was a cache flush loop here to flush the cache | ||
364 | * to memory for the first 8MB of RAM. The cache flush | ||
365 | * has been pushed into the kick_cpu function for those | ||
366 | * platforms that need it. | ||
367 | */ | ||
368 | |||
369 | /* wake up cpu */ | ||
370 | smp_ops->kick_cpu(cpu); | ||
371 | |||
372 | /* | ||
373 | * wait to see if the cpu made a callin (is actually up). | ||
374 | * use this value that I found through experimentation. | ||
375 | * -- Cort | ||
376 | */ | ||
377 | for (c = 1000; c && !cpu_callin_map[cpu]; c--) | ||
378 | udelay(100); | ||
379 | |||
380 | if (!cpu_callin_map[cpu]) { | ||
381 | sprintf(buf, "didn't find cpu %u", cpu); | ||
382 | if (ppc_md.progress) ppc_md.progress(buf, 0x360+cpu); | ||
383 | printk("Processor %u is stuck.\n", cpu); | ||
384 | return -ENOENT; | ||
385 | } | ||
386 | |||
387 | sprintf(buf, "found cpu %u", cpu); | ||
388 | if (ppc_md.progress) ppc_md.progress(buf, 0x350+cpu); | ||
389 | printk("Processor %d found.\n", cpu); | ||
390 | |||
391 | smp_ops->give_timebase(); | ||
392 | cpu_set(cpu, cpu_online_map); | ||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | void smp_cpus_done(unsigned int max_cpus) | ||
397 | { | ||
398 | smp_ops->setup_cpu(0); | ||
399 | } | ||
diff --git a/arch/ppc/kernel/softemu8xx.c b/arch/ppc/kernel/softemu8xx.c new file mode 100644 index 000000000000..9bbb6bf7b645 --- /dev/null +++ b/arch/ppc/kernel/softemu8xx.c | |||
@@ -0,0 +1,147 @@ | |||
1 | /* | ||
2 | * Software emulation of some PPC instructions for the 8xx core. | ||
3 | * | ||
4 | * Copyright (C) 1998 Dan Malek (dmalek@jlc.net) | ||
5 | * | ||
6 | * Software floating emuation for the MPC8xx processor. I did this mostly | ||
7 | * because it was easier than trying to get the libraries compiled for | ||
8 | * software floating point. The goal is still to get the libraries done, | ||
9 | * but I lost patience and needed some hacks to at least get init and | ||
10 | * shells running. The first problem is the setjmp/longjmp that save | ||
11 | * and restore the floating point registers. | ||
12 | * | ||
13 | * For this emulation, our working registers are found on the register | ||
14 | * save area. | ||
15 | */ | ||
16 | |||
17 | #include <linux/errno.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/stddef.h> | ||
22 | #include <linux/unistd.h> | ||
23 | #include <linux/ptrace.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/user.h> | ||
26 | #include <linux/a.out.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | |||
29 | #include <asm/pgtable.h> | ||
30 | #include <asm/uaccess.h> | ||
31 | #include <asm/system.h> | ||
32 | #include <asm/io.h> | ||
33 | |||
34 | extern void | ||
35 | print_8xx_pte(struct mm_struct *mm, unsigned long addr); | ||
36 | extern int | ||
37 | get_8xx_pte(struct mm_struct *mm, unsigned long addr); | ||
38 | |||
39 | /* Eventually we may need a look-up table, but this works for now. | ||
40 | */ | ||
41 | #define LFS 48 | ||
42 | #define LFD 50 | ||
43 | #define LFDU 51 | ||
44 | #define STFD 54 | ||
45 | #define STFDU 55 | ||
46 | #define FMR 63 | ||
47 | |||
48 | /* | ||
49 | * We return 0 on success, 1 on unimplemented instruction, and EFAULT | ||
50 | * if a load/store faulted. | ||
51 | */ | ||
52 | int | ||
53 | Soft_emulate_8xx(struct pt_regs *regs) | ||
54 | { | ||
55 | uint inst, instword; | ||
56 | uint flreg, idxreg, disp; | ||
57 | uint retval; | ||
58 | signed short sdisp; | ||
59 | uint *ea, *ip; | ||
60 | |||
61 | retval = 0; | ||
62 | |||
63 | instword = *((uint *)regs->nip); | ||
64 | inst = instword >> 26; | ||
65 | |||
66 | flreg = (instword >> 21) & 0x1f; | ||
67 | idxreg = (instword >> 16) & 0x1f; | ||
68 | disp = instword & 0xffff; | ||
69 | |||
70 | ea = (uint *)(regs->gpr[idxreg] + disp); | ||
71 | ip = (uint *)¤t->thread.fpr[flreg]; | ||
72 | |||
73 | switch ( inst ) | ||
74 | { | ||
75 | case LFD: | ||
76 | /* this is a 16 bit quantity that is sign extended | ||
77 | * so use a signed short here -- Cort | ||
78 | */ | ||
79 | sdisp = (instword & 0xffff); | ||
80 | ea = (uint *)(regs->gpr[idxreg] + sdisp); | ||
81 | if (copy_from_user(ip, ea, sizeof(double))) | ||
82 | retval = -EFAULT; | ||
83 | break; | ||
84 | |||
85 | case LFDU: | ||
86 | if (copy_from_user(ip, ea, sizeof(double))) | ||
87 | retval = -EFAULT; | ||
88 | else | ||
89 | regs->gpr[idxreg] = (uint)ea; | ||
90 | break; | ||
91 | case LFS: | ||
92 | sdisp = (instword & 0xffff); | ||
93 | ea = (uint *)(regs->gpr[idxreg] + sdisp); | ||
94 | if (copy_from_user(ip, ea, sizeof(float))) | ||
95 | retval = -EFAULT; | ||
96 | break; | ||
97 | case STFD: | ||
98 | /* this is a 16 bit quantity that is sign extended | ||
99 | * so use a signed short here -- Cort | ||
100 | */ | ||
101 | sdisp = (instword & 0xffff); | ||
102 | ea = (uint *)(regs->gpr[idxreg] + sdisp); | ||
103 | if (copy_to_user(ea, ip, sizeof(double))) | ||
104 | retval = -EFAULT; | ||
105 | break; | ||
106 | |||
107 | case STFDU: | ||
108 | if (copy_to_user(ea, ip, sizeof(double))) | ||
109 | retval = -EFAULT; | ||
110 | else | ||
111 | regs->gpr[idxreg] = (uint)ea; | ||
112 | break; | ||
113 | case FMR: | ||
114 | /* assume this is a fp move -- Cort */ | ||
115 | memcpy( ip, ¤t->thread.fpr[(instword>>11)&0x1f], | ||
116 | sizeof(double) ); | ||
117 | break; | ||
118 | default: | ||
119 | retval = 1; | ||
120 | printk("Bad emulation %s/%d\n" | ||
121 | " NIP: %08lx instruction: %08x opcode: %x " | ||
122 | "A: %x B: %x C: %x code: %x rc: %x\n", | ||
123 | current->comm,current->pid, | ||
124 | regs->nip, | ||
125 | instword,inst, | ||
126 | (instword>>16)&0x1f, | ||
127 | (instword>>11)&0x1f, | ||
128 | (instword>>6)&0x1f, | ||
129 | (instword>>1)&0x3ff, | ||
130 | instword&1); | ||
131 | { | ||
132 | int pa; | ||
133 | print_8xx_pte(current->mm,regs->nip); | ||
134 | pa = get_8xx_pte(current->mm,regs->nip) & PAGE_MASK; | ||
135 | pa |= (regs->nip & ~PAGE_MASK); | ||
136 | pa = (unsigned long)__va(pa); | ||
137 | printk("Kernel VA for NIP %x ", pa); | ||
138 | print_8xx_pte(current->mm,pa); | ||
139 | } | ||
140 | |||
141 | } | ||
142 | |||
143 | if (retval == 0) | ||
144 | regs->nip += 4; | ||
145 | return(retval); | ||
146 | } | ||
147 | |||
diff --git a/arch/ppc/kernel/swsusp.S b/arch/ppc/kernel/swsusp.S new file mode 100644 index 000000000000..55148bb88d39 --- /dev/null +++ b/arch/ppc/kernel/swsusp.S | |||
@@ -0,0 +1,349 @@ | |||
1 | #include <linux/config.h> | ||
2 | #include <linux/threads.h> | ||
3 | #include <asm/processor.h> | ||
4 | #include <asm/page.h> | ||
5 | #include <asm/cputable.h> | ||
6 | #include <asm/thread_info.h> | ||
7 | #include <asm/ppc_asm.h> | ||
8 | #include <asm/offsets.h> | ||
9 | |||
10 | |||
11 | /* | ||
12 | * Structure for storing CPU registers on the save area. | ||
13 | */ | ||
14 | #define SL_SP 0 | ||
15 | #define SL_PC 4 | ||
16 | #define SL_MSR 8 | ||
17 | #define SL_SDR1 0xc | ||
18 | #define SL_SPRG0 0x10 /* 4 sprg's */ | ||
19 | #define SL_DBAT0 0x20 | ||
20 | #define SL_IBAT0 0x28 | ||
21 | #define SL_DBAT1 0x30 | ||
22 | #define SL_IBAT1 0x38 | ||
23 | #define SL_DBAT2 0x40 | ||
24 | #define SL_IBAT2 0x48 | ||
25 | #define SL_DBAT3 0x50 | ||
26 | #define SL_IBAT3 0x58 | ||
27 | #define SL_TB 0x60 | ||
28 | #define SL_R2 0x68 | ||
29 | #define SL_CR 0x6c | ||
30 | #define SL_LR 0x70 | ||
31 | #define SL_R12 0x74 /* r12 to r31 */ | ||
32 | #define SL_SIZE (SL_R12 + 80) | ||
33 | |||
34 | .section .data | ||
35 | .align 5 | ||
36 | |||
37 | _GLOBAL(swsusp_save_area) | ||
38 | .space SL_SIZE | ||
39 | |||
40 | |||
41 | .section .text | ||
42 | .align 5 | ||
43 | |||
44 | _GLOBAL(swsusp_arch_suspend) | ||
45 | |||
46 | lis r11,swsusp_save_area@h | ||
47 | ori r11,r11,swsusp_save_area@l | ||
48 | |||
49 | mflr r0 | ||
50 | stw r0,SL_LR(r11) | ||
51 | mfcr r0 | ||
52 | stw r0,SL_CR(r11) | ||
53 | stw r1,SL_SP(r11) | ||
54 | stw r2,SL_R2(r11) | ||
55 | stmw r12,SL_R12(r11) | ||
56 | |||
57 | /* Save MSR & SDR1 */ | ||
58 | mfmsr r4 | ||
59 | stw r4,SL_MSR(r11) | ||
60 | mfsdr1 r4 | ||
61 | stw r4,SL_SDR1(r11) | ||
62 | |||
63 | /* Get a stable timebase and save it */ | ||
64 | 1: mftbu r4 | ||
65 | stw r4,SL_TB(r11) | ||
66 | mftb r5 | ||
67 | stw r5,SL_TB+4(r11) | ||
68 | mftbu r3 | ||
69 | cmpw r3,r4 | ||
70 | bne 1b | ||
71 | |||
72 | /* Save SPRGs */ | ||
73 | mfsprg r4,0 | ||
74 | stw r4,SL_SPRG0(r11) | ||
75 | mfsprg r4,1 | ||
76 | stw r4,SL_SPRG0+4(r11) | ||
77 | mfsprg r4,2 | ||
78 | stw r4,SL_SPRG0+8(r11) | ||
79 | mfsprg r4,3 | ||
80 | stw r4,SL_SPRG0+12(r11) | ||
81 | |||
82 | /* Save BATs */ | ||
83 | mfdbatu r4,0 | ||
84 | stw r4,SL_DBAT0(r11) | ||
85 | mfdbatl r4,0 | ||
86 | stw r4,SL_DBAT0+4(r11) | ||
87 | mfdbatu r4,1 | ||
88 | stw r4,SL_DBAT1(r11) | ||
89 | mfdbatl r4,1 | ||
90 | stw r4,SL_DBAT1+4(r11) | ||
91 | mfdbatu r4,2 | ||
92 | stw r4,SL_DBAT2(r11) | ||
93 | mfdbatl r4,2 | ||
94 | stw r4,SL_DBAT2+4(r11) | ||
95 | mfdbatu r4,3 | ||
96 | stw r4,SL_DBAT3(r11) | ||
97 | mfdbatl r4,3 | ||
98 | stw r4,SL_DBAT3+4(r11) | ||
99 | mfibatu r4,0 | ||
100 | stw r4,SL_IBAT0(r11) | ||
101 | mfibatl r4,0 | ||
102 | stw r4,SL_IBAT0+4(r11) | ||
103 | mfibatu r4,1 | ||
104 | stw r4,SL_IBAT1(r11) | ||
105 | mfibatl r4,1 | ||
106 | stw r4,SL_IBAT1+4(r11) | ||
107 | mfibatu r4,2 | ||
108 | stw r4,SL_IBAT2(r11) | ||
109 | mfibatl r4,2 | ||
110 | stw r4,SL_IBAT2+4(r11) | ||
111 | mfibatu r4,3 | ||
112 | stw r4,SL_IBAT3(r11) | ||
113 | mfibatl r4,3 | ||
114 | stw r4,SL_IBAT3+4(r11) | ||
115 | |||
116 | #if 0 | ||
117 | /* Backup various CPU config stuffs */ | ||
118 | bl __save_cpu_setup | ||
119 | #endif | ||
120 | /* Call the low level suspend stuff (we should probably have made | ||
121 | * a stackframe... | ||
122 | */ | ||
123 | bl swsusp_save | ||
124 | |||
125 | /* Restore LR from the save area */ | ||
126 | lis r11,swsusp_save_area@h | ||
127 | ori r11,r11,swsusp_save_area@l | ||
128 | lwz r0,SL_LR(r11) | ||
129 | mtlr r0 | ||
130 | |||
131 | blr | ||
132 | |||
133 | |||
134 | /* Resume code */ | ||
135 | _GLOBAL(swsusp_arch_resume) | ||
136 | |||
137 | /* Stop pending alitvec streams and memory accesses */ | ||
138 | BEGIN_FTR_SECTION | ||
139 | DSSALL | ||
140 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
141 | sync | ||
142 | |||
143 | /* Disable MSR:DR to make sure we don't take a TLB or | ||
144 | * hash miss during the copy, as our hash table will | ||
145 | * for a while be unuseable. For .text, we assume we are | ||
146 | * covered by a BAT. This works only for non-G5 at this | ||
147 | * point. G5 will need a better approach, possibly using | ||
148 | * a small temporary hash table filled with large mappings, | ||
149 | * disabling the MMU completely isn't a good option for | ||
150 | * performance reasons. | ||
151 | * (Note that 750's may have the same performance issue as | ||
152 | * the G5 in this case, we should investigate using moving | ||
153 | * BATs for these CPUs) | ||
154 | */ | ||
155 | mfmsr r0 | ||
156 | sync | ||
157 | rlwinm r0,r0,0,28,26 /* clear MSR_DR */ | ||
158 | mtmsr r0 | ||
159 | sync | ||
160 | isync | ||
161 | |||
162 | /* Load ptr the list of pages to copy in r3 */ | ||
163 | lis r11,(pagedir_nosave - KERNELBASE)@h | ||
164 | ori r11,r11,pagedir_nosave@l | ||
165 | lwz r10,0(r11) | ||
166 | |||
167 | /* Copy the pages. This is a very basic implementation, to | ||
168 | * be replaced by something more cache efficient */ | ||
169 | 1: | ||
170 | tophys(r3,r10) | ||
171 | li r0,256 | ||
172 | mtctr r0 | ||
173 | lwz r11,pbe_address(r3) /* source */ | ||
174 | tophys(r5,r11) | ||
175 | lwz r10,pbe_orig_address(r3) /* destination */ | ||
176 | tophys(r6,r10) | ||
177 | 2: | ||
178 | lwz r8,0(r5) | ||
179 | lwz r9,4(r5) | ||
180 | lwz r10,8(r5) | ||
181 | lwz r11,12(r5) | ||
182 | addi r5,r5,16 | ||
183 | stw r8,0(r6) | ||
184 | stw r9,4(r6) | ||
185 | stw r10,8(r6) | ||
186 | stw r11,12(r6) | ||
187 | addi r6,r6,16 | ||
188 | bdnz 2b | ||
189 | lwz r10,pbe_next(r3) | ||
190 | cmpwi 0,r10,0 | ||
191 | bne 1b | ||
192 | |||
193 | /* Do a very simple cache flush/inval of the L1 to ensure | ||
194 | * coherency of the icache | ||
195 | */ | ||
196 | lis r3,0x0002 | ||
197 | mtctr r3 | ||
198 | li r3, 0 | ||
199 | 1: | ||
200 | lwz r0,0(r3) | ||
201 | addi r3,r3,0x0020 | ||
202 | bdnz 1b | ||
203 | isync | ||
204 | sync | ||
205 | |||
206 | /* Now flush those cache lines */ | ||
207 | lis r3,0x0002 | ||
208 | mtctr r3 | ||
209 | li r3, 0 | ||
210 | 1: | ||
211 | dcbf 0,r3 | ||
212 | addi r3,r3,0x0020 | ||
213 | bdnz 1b | ||
214 | sync | ||
215 | |||
216 | /* Ok, we are now running with the kernel data of the old | ||
217 | * kernel fully restored. We can get to the save area | ||
218 | * easily now. As for the rest of the code, it assumes the | ||
219 | * loader kernel and the booted one are exactly identical | ||
220 | */ | ||
221 | lis r11,swsusp_save_area@h | ||
222 | ori r11,r11,swsusp_save_area@l | ||
223 | tophys(r11,r11) | ||
224 | |||
225 | #if 0 | ||
226 | /* Restore various CPU config stuffs */ | ||
227 | bl __restore_cpu_setup | ||
228 | #endif | ||
229 | /* Restore the BATs, and SDR1. Then we can turn on the MMU. | ||
230 | * This is a bit hairy as we are running out of those BATs, | ||
231 | * but first, our code is probably in the icache, and we are | ||
232 | * writing the same value to the BAT, so that should be fine, | ||
233 | * though a better solution will have to be found long-term | ||
234 | */ | ||
235 | lwz r4,SL_SDR1(r11) | ||
236 | mtsdr1 r4 | ||
237 | lwz r4,SL_SPRG0(r11) | ||
238 | mtsprg 0,r4 | ||
239 | lwz r4,SL_SPRG0+4(r11) | ||
240 | mtsprg 1,r4 | ||
241 | lwz r4,SL_SPRG0+8(r11) | ||
242 | mtsprg 2,r4 | ||
243 | lwz r4,SL_SPRG0+12(r11) | ||
244 | mtsprg 3,r4 | ||
245 | |||
246 | #if 0 | ||
247 | lwz r4,SL_DBAT0(r11) | ||
248 | mtdbatu 0,r4 | ||
249 | lwz r4,SL_DBAT0+4(r11) | ||
250 | mtdbatl 0,r4 | ||
251 | lwz r4,SL_DBAT1(r11) | ||
252 | mtdbatu 1,r4 | ||
253 | lwz r4,SL_DBAT1+4(r11) | ||
254 | mtdbatl 1,r4 | ||
255 | lwz r4,SL_DBAT2(r11) | ||
256 | mtdbatu 2,r4 | ||
257 | lwz r4,SL_DBAT2+4(r11) | ||
258 | mtdbatl 2,r4 | ||
259 | lwz r4,SL_DBAT3(r11) | ||
260 | mtdbatu 3,r4 | ||
261 | lwz r4,SL_DBAT3+4(r11) | ||
262 | mtdbatl 3,r4 | ||
263 | lwz r4,SL_IBAT0(r11) | ||
264 | mtibatu 0,r4 | ||
265 | lwz r4,SL_IBAT0+4(r11) | ||
266 | mtibatl 0,r4 | ||
267 | lwz r4,SL_IBAT1(r11) | ||
268 | mtibatu 1,r4 | ||
269 | lwz r4,SL_IBAT1+4(r11) | ||
270 | mtibatl 1,r4 | ||
271 | lwz r4,SL_IBAT2(r11) | ||
272 | mtibatu 2,r4 | ||
273 | lwz r4,SL_IBAT2+4(r11) | ||
274 | mtibatl 2,r4 | ||
275 | lwz r4,SL_IBAT3(r11) | ||
276 | mtibatu 3,r4 | ||
277 | lwz r4,SL_IBAT3+4(r11) | ||
278 | mtibatl 3,r4 | ||
279 | #endif | ||
280 | |||
281 | BEGIN_FTR_SECTION | ||
282 | li r4,0 | ||
283 | mtspr SPRN_DBAT4U,r4 | ||
284 | mtspr SPRN_DBAT4L,r4 | ||
285 | mtspr SPRN_DBAT5U,r4 | ||
286 | mtspr SPRN_DBAT5L,r4 | ||
287 | mtspr SPRN_DBAT6U,r4 | ||
288 | mtspr SPRN_DBAT6L,r4 | ||
289 | mtspr SPRN_DBAT7U,r4 | ||
290 | mtspr SPRN_DBAT7L,r4 | ||
291 | mtspr SPRN_IBAT4U,r4 | ||
292 | mtspr SPRN_IBAT4L,r4 | ||
293 | mtspr SPRN_IBAT5U,r4 | ||
294 | mtspr SPRN_IBAT5L,r4 | ||
295 | mtspr SPRN_IBAT6U,r4 | ||
296 | mtspr SPRN_IBAT6L,r4 | ||
297 | mtspr SPRN_IBAT7U,r4 | ||
298 | mtspr SPRN_IBAT7L,r4 | ||
299 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) | ||
300 | |||
301 | /* Flush all TLBs */ | ||
302 | lis r4,0x1000 | ||
303 | 1: addic. r4,r4,-0x1000 | ||
304 | tlbie r4 | ||
305 | blt 1b | ||
306 | sync | ||
307 | |||
308 | /* restore the MSR and turn on the MMU */ | ||
309 | lwz r3,SL_MSR(r11) | ||
310 | bl turn_on_mmu | ||
311 | tovirt(r11,r11) | ||
312 | |||
313 | /* Restore TB */ | ||
314 | li r3,0 | ||
315 | mttbl r3 | ||
316 | lwz r3,SL_TB(r11) | ||
317 | lwz r4,SL_TB+4(r11) | ||
318 | mttbu r3 | ||
319 | mttbl r4 | ||
320 | |||
321 | /* Kick decrementer */ | ||
322 | li r0,1 | ||
323 | mtdec r0 | ||
324 | |||
325 | /* Restore the callee-saved registers and return */ | ||
326 | lwz r0,SL_CR(r11) | ||
327 | mtcr r0 | ||
328 | lwz r2,SL_R2(r11) | ||
329 | lmw r12,SL_R12(r11) | ||
330 | lwz r1,SL_SP(r11) | ||
331 | lwz r0,SL_LR(r11) | ||
332 | mtlr r0 | ||
333 | |||
334 | // XXX Note: we don't really need to call swsusp_resume | ||
335 | |||
336 | li r3,0 | ||
337 | blr | ||
338 | |||
339 | /* FIXME:This construct is actually not useful since we don't shut | ||
340 | * down the instruction MMU, we could just flip back MSR-DR on. | ||
341 | */ | ||
342 | turn_on_mmu: | ||
343 | mflr r4 | ||
344 | mtsrr0 r4 | ||
345 | mtsrr1 r3 | ||
346 | sync | ||
347 | isync | ||
348 | rfi | ||
349 | |||
diff --git a/arch/ppc/kernel/syscalls.c b/arch/ppc/kernel/syscalls.c new file mode 100644 index 000000000000..124313ce3c09 --- /dev/null +++ b/arch/ppc/kernel/syscalls.c | |||
@@ -0,0 +1,272 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/sys_ppc.c | ||
3 | * | ||
4 | * PowerPC version | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * | ||
7 | * Derived from "arch/i386/kernel/sys_i386.c" | ||
8 | * Adapted from the i386 version by Gary Thomas | ||
9 | * Modified by Cort Dougan (cort@cs.nmt.edu) | ||
10 | * and Paul Mackerras (paulus@cs.anu.edu.au). | ||
11 | * | ||
12 | * This file contains various random system calls that | ||
13 | * have a non-standard calling sequence on the Linux/PPC | ||
14 | * platform. | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or | ||
17 | * modify it under the terms of the GNU General Public License | ||
18 | * as published by the Free Software Foundation; either version | ||
19 | * 2 of the License, or (at your option) any later version. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/errno.h> | ||
24 | #include <linux/sched.h> | ||
25 | #include <linux/mm.h> | ||
26 | #include <linux/smp.h> | ||
27 | #include <linux/smp_lock.h> | ||
28 | #include <linux/sem.h> | ||
29 | #include <linux/msg.h> | ||
30 | #include <linux/shm.h> | ||
31 | #include <linux/stat.h> | ||
32 | #include <linux/syscalls.h> | ||
33 | #include <linux/mman.h> | ||
34 | #include <linux/sys.h> | ||
35 | #include <linux/ipc.h> | ||
36 | #include <linux/utsname.h> | ||
37 | #include <linux/file.h> | ||
38 | #include <linux/unistd.h> | ||
39 | |||
40 | #include <asm/uaccess.h> | ||
41 | #include <asm/ipc.h> | ||
42 | #include <asm/semaphore.h> | ||
43 | |||
44 | void | ||
45 | check_bugs(void) | ||
46 | { | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * sys_ipc() is the de-multiplexer for the SysV IPC calls.. | ||
51 | * | ||
52 | * This is really horribly ugly. | ||
53 | */ | ||
54 | int | ||
55 | sys_ipc (uint call, int first, int second, int third, void __user *ptr, long fifth) | ||
56 | { | ||
57 | int version, ret; | ||
58 | |||
59 | version = call >> 16; /* hack for backward compatibility */ | ||
60 | call &= 0xffff; | ||
61 | |||
62 | ret = -ENOSYS; | ||
63 | switch (call) { | ||
64 | case SEMOP: | ||
65 | ret = sys_semtimedop (first, (struct sembuf __user *)ptr, | ||
66 | second, NULL); | ||
67 | break; | ||
68 | case SEMTIMEDOP: | ||
69 | ret = sys_semtimedop (first, (struct sembuf __user *)ptr, | ||
70 | second, (const struct timespec __user *) fifth); | ||
71 | break; | ||
72 | case SEMGET: | ||
73 | ret = sys_semget (first, second, third); | ||
74 | break; | ||
75 | case SEMCTL: { | ||
76 | union semun fourth; | ||
77 | |||
78 | if (!ptr) | ||
79 | break; | ||
80 | if ((ret = access_ok(VERIFY_READ, ptr, sizeof(long)) ? 0 : -EFAULT) | ||
81 | || (ret = get_user(fourth.__pad, (void __user *__user *)ptr))) | ||
82 | break; | ||
83 | ret = sys_semctl (first, second, third, fourth); | ||
84 | break; | ||
85 | } | ||
86 | case MSGSND: | ||
87 | ret = sys_msgsnd (first, (struct msgbuf __user *) ptr, second, third); | ||
88 | break; | ||
89 | case MSGRCV: | ||
90 | switch (version) { | ||
91 | case 0: { | ||
92 | struct ipc_kludge tmp; | ||
93 | |||
94 | if (!ptr) | ||
95 | break; | ||
96 | if ((ret = access_ok(VERIFY_READ, ptr, sizeof(tmp)) ? 0 : -EFAULT) | ||
97 | || (ret = copy_from_user(&tmp, | ||
98 | (struct ipc_kludge __user *) ptr, | ||
99 | sizeof (tmp)) ? -EFAULT : 0)) | ||
100 | break; | ||
101 | ret = sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp, | ||
102 | third); | ||
103 | break; | ||
104 | } | ||
105 | default: | ||
106 | ret = sys_msgrcv (first, (struct msgbuf __user *) ptr, | ||
107 | second, fifth, third); | ||
108 | break; | ||
109 | } | ||
110 | break; | ||
111 | case MSGGET: | ||
112 | ret = sys_msgget ((key_t) first, second); | ||
113 | break; | ||
114 | case MSGCTL: | ||
115 | ret = sys_msgctl (first, second, (struct msqid_ds __user *) ptr); | ||
116 | break; | ||
117 | case SHMAT: { | ||
118 | ulong raddr; | ||
119 | |||
120 | if ((ret = access_ok(VERIFY_WRITE, (ulong __user *) third, | ||
121 | sizeof(ulong)) ? 0 : -EFAULT)) | ||
122 | break; | ||
123 | ret = do_shmat (first, (char __user *) ptr, second, &raddr); | ||
124 | if (ret) | ||
125 | break; | ||
126 | ret = put_user (raddr, (ulong __user *) third); | ||
127 | break; | ||
128 | } | ||
129 | case SHMDT: | ||
130 | ret = sys_shmdt ((char __user *)ptr); | ||
131 | break; | ||
132 | case SHMGET: | ||
133 | ret = sys_shmget (first, second, third); | ||
134 | break; | ||
135 | case SHMCTL: | ||
136 | ret = sys_shmctl (first, second, (struct shmid_ds __user *) ptr); | ||
137 | break; | ||
138 | } | ||
139 | |||
140 | return ret; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * sys_pipe() is the normal C calling standard for creating | ||
145 | * a pipe. It's not the way unix traditionally does this, though. | ||
146 | */ | ||
147 | int sys_pipe(int __user *fildes) | ||
148 | { | ||
149 | int fd[2]; | ||
150 | int error; | ||
151 | |||
152 | error = do_pipe(fd); | ||
153 | if (!error) { | ||
154 | if (copy_to_user(fildes, fd, 2*sizeof(int))) | ||
155 | error = -EFAULT; | ||
156 | } | ||
157 | return error; | ||
158 | } | ||
159 | |||
160 | static inline unsigned long | ||
161 | do_mmap2(unsigned long addr, size_t len, | ||
162 | unsigned long prot, unsigned long flags, | ||
163 | unsigned long fd, unsigned long pgoff) | ||
164 | { | ||
165 | struct file * file = NULL; | ||
166 | int ret = -EBADF; | ||
167 | |||
168 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
169 | if (!(flags & MAP_ANONYMOUS)) { | ||
170 | if (!(file = fget(fd))) | ||
171 | goto out; | ||
172 | } | ||
173 | |||
174 | down_write(¤t->mm->mmap_sem); | ||
175 | ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
176 | up_write(¤t->mm->mmap_sem); | ||
177 | if (file) | ||
178 | fput(file); | ||
179 | out: | ||
180 | return ret; | ||
181 | } | ||
182 | |||
183 | unsigned long sys_mmap2(unsigned long addr, size_t len, | ||
184 | unsigned long prot, unsigned long flags, | ||
185 | unsigned long fd, unsigned long pgoff) | ||
186 | { | ||
187 | return do_mmap2(addr, len, prot, flags, fd, pgoff); | ||
188 | } | ||
189 | |||
190 | unsigned long sys_mmap(unsigned long addr, size_t len, | ||
191 | unsigned long prot, unsigned long flags, | ||
192 | unsigned long fd, off_t offset) | ||
193 | { | ||
194 | int err = -EINVAL; | ||
195 | |||
196 | if (offset & ~PAGE_MASK) | ||
197 | goto out; | ||
198 | |||
199 | err = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); | ||
200 | out: | ||
201 | return err; | ||
202 | } | ||
203 | |||
204 | /* | ||
205 | * Due to some executables calling the wrong select we sometimes | ||
206 | * get wrong args. This determines how the args are being passed | ||
207 | * (a single ptr to them all args passed) then calls | ||
208 | * sys_select() with the appropriate args. -- Cort | ||
209 | */ | ||
210 | int | ||
211 | ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp) | ||
212 | { | ||
213 | if ( (unsigned long)n >= 4096 ) | ||
214 | { | ||
215 | unsigned long __user *buffer = (unsigned long __user *)n; | ||
216 | if (!access_ok(VERIFY_READ, buffer, 5*sizeof(unsigned long)) | ||
217 | || __get_user(n, buffer) | ||
218 | || __get_user(inp, ((fd_set __user * __user *)(buffer+1))) | ||
219 | || __get_user(outp, ((fd_set __user * __user *)(buffer+2))) | ||
220 | || __get_user(exp, ((fd_set __user * __user *)(buffer+3))) | ||
221 | || __get_user(tvp, ((struct timeval __user * __user *)(buffer+4)))) | ||
222 | return -EFAULT; | ||
223 | } | ||
224 | return sys_select(n, inp, outp, exp, tvp); | ||
225 | } | ||
226 | |||
227 | int sys_uname(struct old_utsname __user * name) | ||
228 | { | ||
229 | int err = -EFAULT; | ||
230 | |||
231 | down_read(&uts_sem); | ||
232 | if (name && !copy_to_user(name, &system_utsname, sizeof (*name))) | ||
233 | err = 0; | ||
234 | up_read(&uts_sem); | ||
235 | return err; | ||
236 | } | ||
237 | |||
238 | int sys_olduname(struct oldold_utsname __user * name) | ||
239 | { | ||
240 | int error; | ||
241 | |||
242 | if (!name) | ||
243 | return -EFAULT; | ||
244 | if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname))) | ||
245 | return -EFAULT; | ||
246 | |||
247 | down_read(&uts_sem); | ||
248 | error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN); | ||
249 | error -= __put_user(0,name->sysname+__OLD_UTS_LEN); | ||
250 | error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN); | ||
251 | error -= __put_user(0,name->nodename+__OLD_UTS_LEN); | ||
252 | error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN); | ||
253 | error -= __put_user(0,name->release+__OLD_UTS_LEN); | ||
254 | error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN); | ||
255 | error -= __put_user(0,name->version+__OLD_UTS_LEN); | ||
256 | error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN); | ||
257 | error = __put_user(0,name->machine+__OLD_UTS_LEN); | ||
258 | up_read(&uts_sem); | ||
259 | |||
260 | error = error ? -EFAULT : 0; | ||
261 | return error; | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * We put the arguments in a different order so we only use 6 | ||
266 | * registers for arguments, rather than 7 as sys_fadvise64_64 needs | ||
267 | * (because `offset' goes in r5/r6). | ||
268 | */ | ||
269 | long ppc_fadvise64_64(int fd, int advice, loff_t offset, loff_t len) | ||
270 | { | ||
271 | return sys_fadvise64_64(fd, offset, len, advice); | ||
272 | } | ||
diff --git a/arch/ppc/kernel/temp.c b/arch/ppc/kernel/temp.c new file mode 100644 index 000000000000..fe8bb634ead0 --- /dev/null +++ b/arch/ppc/kernel/temp.c | |||
@@ -0,0 +1,272 @@ | |||
1 | /* | ||
2 | * temp.c Thermal management for cpu's with Thermal Assist Units | ||
3 | * | ||
4 | * Written by Troy Benjegerdes <hozer@drgw.net> | ||
5 | * | ||
6 | * TODO: | ||
7 | * dynamic power management to limit peak CPU temp (using ICTC) | ||
8 | * calibration??? | ||
9 | * | ||
10 | * Silly, crazy ideas: use cpu load (from scheduler) and ICTC to extend battery | ||
11 | * life in portables, and add a 'performance/watt' metric somewhere in /proc | ||
12 | */ | ||
13 | |||
14 | #include <linux/config.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/jiffies.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/param.h> | ||
19 | #include <linux/string.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/init.h> | ||
23 | |||
24 | #include <asm/segment.h> | ||
25 | #include <asm/io.h> | ||
26 | #include <asm/reg.h> | ||
27 | #include <asm/nvram.h> | ||
28 | #include <asm/cache.h> | ||
29 | #include <asm/8xx_immap.h> | ||
30 | #include <asm/machdep.h> | ||
31 | |||
32 | static struct tau_temp | ||
33 | { | ||
34 | int interrupts; | ||
35 | unsigned char low; | ||
36 | unsigned char high; | ||
37 | unsigned char grew; | ||
38 | } tau[NR_CPUS]; | ||
39 | |||
40 | struct timer_list tau_timer; | ||
41 | |||
42 | #undef DEBUG | ||
43 | |||
44 | /* TODO: put these in a /proc interface, with some sanity checks, and maybe | ||
45 | * dynamic adjustment to minimize # of interrupts */ | ||
46 | /* configurable values for step size and how much to expand the window when | ||
47 | * we get an interrupt. These are based on the limit that was out of range */ | ||
48 | #define step_size 2 /* step size when temp goes out of range */ | ||
49 | #define window_expand 1 /* expand the window by this much */ | ||
50 | /* configurable values for shrinking the window */ | ||
51 | #define shrink_timer 2*HZ /* period between shrinking the window */ | ||
52 | #define min_window 2 /* minimum window size, degrees C */ | ||
53 | |||
54 | void set_thresholds(unsigned long cpu) | ||
55 | { | ||
56 | #ifdef CONFIG_TAU_INT | ||
57 | /* | ||
58 | * setup THRM1, | ||
59 | * threshold, valid bit, enable interrupts, interrupt when below threshold | ||
60 | */ | ||
61 | mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID); | ||
62 | |||
63 | /* setup THRM2, | ||
64 | * threshold, valid bit, enable interrupts, interrupt when above threshhold | ||
65 | */ | ||
66 | mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE); | ||
67 | #else | ||
68 | /* same thing but don't enable interrupts */ | ||
69 | mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID); | ||
70 | mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V); | ||
71 | #endif | ||
72 | } | ||
73 | |||
74 | void TAUupdate(int cpu) | ||
75 | { | ||
76 | unsigned thrm; | ||
77 | |||
78 | #ifdef DEBUG | ||
79 | printk("TAUupdate "); | ||
80 | #endif | ||
81 | |||
82 | /* if both thresholds are crossed, the step_sizes cancel out | ||
83 | * and the window winds up getting expanded twice. */ | ||
84 | if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */ | ||
85 | if(thrm & THRM1_TIN){ /* crossed low threshold */ | ||
86 | if (tau[cpu].low >= step_size){ | ||
87 | tau[cpu].low -= step_size; | ||
88 | tau[cpu].high -= (step_size - window_expand); | ||
89 | } | ||
90 | tau[cpu].grew = 1; | ||
91 | #ifdef DEBUG | ||
92 | printk("low threshold crossed "); | ||
93 | #endif | ||
94 | } | ||
95 | } | ||
96 | if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */ | ||
97 | if(thrm & THRM1_TIN){ /* crossed high threshold */ | ||
98 | if (tau[cpu].high <= 127-step_size){ | ||
99 | tau[cpu].low += (step_size - window_expand); | ||
100 | tau[cpu].high += step_size; | ||
101 | } | ||
102 | tau[cpu].grew = 1; | ||
103 | #ifdef DEBUG | ||
104 | printk("high threshold crossed "); | ||
105 | #endif | ||
106 | } | ||
107 | } | ||
108 | |||
109 | #ifdef DEBUG | ||
110 | printk("grew = %d\n", tau[cpu].grew); | ||
111 | #endif | ||
112 | |||
113 | #ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */ | ||
114 | set_thresholds(cpu); | ||
115 | #endif | ||
116 | |||
117 | } | ||
118 | |||
119 | #ifdef CONFIG_TAU_INT | ||
120 | /* | ||
121 | * TAU interrupts - called when we have a thermal assist unit interrupt | ||
122 | * with interrupts disabled | ||
123 | */ | ||
124 | |||
125 | void TAUException(struct pt_regs * regs) | ||
126 | { | ||
127 | int cpu = smp_processor_id(); | ||
128 | |||
129 | irq_enter(); | ||
130 | tau[cpu].interrupts++; | ||
131 | |||
132 | TAUupdate(cpu); | ||
133 | |||
134 | irq_exit(); | ||
135 | } | ||
136 | #endif /* CONFIG_TAU_INT */ | ||
137 | |||
138 | static void tau_timeout(void * info) | ||
139 | { | ||
140 | int cpu; | ||
141 | unsigned long flags; | ||
142 | int size; | ||
143 | int shrink; | ||
144 | |||
145 | /* disabling interrupts *should* be okay */ | ||
146 | local_irq_save(flags); | ||
147 | cpu = smp_processor_id(); | ||
148 | |||
149 | #ifndef CONFIG_TAU_INT | ||
150 | TAUupdate(cpu); | ||
151 | #endif | ||
152 | |||
153 | size = tau[cpu].high - tau[cpu].low; | ||
154 | if (size > min_window && ! tau[cpu].grew) { | ||
155 | /* do an exponential shrink of half the amount currently over size */ | ||
156 | shrink = (2 + size - min_window) / 4; | ||
157 | if (shrink) { | ||
158 | tau[cpu].low += shrink; | ||
159 | tau[cpu].high -= shrink; | ||
160 | } else { /* size must have been min_window + 1 */ | ||
161 | tau[cpu].low += 1; | ||
162 | #if 1 /* debug */ | ||
163 | if ((tau[cpu].high - tau[cpu].low) != min_window){ | ||
164 | printk(KERN_ERR "temp.c: line %d, logic error\n", __LINE__); | ||
165 | } | ||
166 | #endif | ||
167 | } | ||
168 | } | ||
169 | |||
170 | tau[cpu].grew = 0; | ||
171 | |||
172 | set_thresholds(cpu); | ||
173 | |||
174 | /* | ||
175 | * Do the enable every time, since otherwise a bunch of (relatively) | ||
176 | * complex sleep code needs to be added. One mtspr every time | ||
177 | * tau_timeout is called is probably not a big deal. | ||
178 | * | ||
179 | * Enable thermal sensor and set up sample interval timer | ||
180 | * need 20 us to do the compare.. until a nice 'cpu_speed' function | ||
181 | * call is implemented, just assume a 500 mhz clock. It doesn't really | ||
182 | * matter if we take too long for a compare since it's all interrupt | ||
183 | * driven anyway. | ||
184 | * | ||
185 | * use a extra long time.. (60 us @ 500 mhz) | ||
186 | */ | ||
187 | mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E); | ||
188 | |||
189 | local_irq_restore(flags); | ||
190 | } | ||
191 | |||
192 | static void tau_timeout_smp(unsigned long unused) | ||
193 | { | ||
194 | |||
195 | /* schedule ourselves to be run again */ | ||
196 | mod_timer(&tau_timer, jiffies + shrink_timer) ; | ||
197 | on_each_cpu(tau_timeout, NULL, 1, 0); | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * setup the TAU | ||
202 | * | ||
203 | * Set things up to use THRM1 as a temperature lower bound, and THRM2 as an upper bound. | ||
204 | * Start off at zero | ||
205 | */ | ||
206 | |||
207 | int tau_initialized = 0; | ||
208 | |||
209 | void __init TAU_init_smp(void * info) | ||
210 | { | ||
211 | unsigned long cpu = smp_processor_id(); | ||
212 | |||
213 | /* set these to a reasonable value and let the timer shrink the | ||
214 | * window */ | ||
215 | tau[cpu].low = 5; | ||
216 | tau[cpu].high = 120; | ||
217 | |||
218 | set_thresholds(cpu); | ||
219 | } | ||
220 | |||
221 | int __init TAU_init(void) | ||
222 | { | ||
223 | /* We assume in SMP that if one CPU has TAU support, they | ||
224 | * all have it --BenH | ||
225 | */ | ||
226 | if (!cpu_has_feature(CPU_FTR_TAU)) { | ||
227 | printk("Thermal assist unit not available\n"); | ||
228 | tau_initialized = 0; | ||
229 | return 1; | ||
230 | } | ||
231 | |||
232 | |||
233 | /* first, set up the window shrinking timer */ | ||
234 | init_timer(&tau_timer); | ||
235 | tau_timer.function = tau_timeout_smp; | ||
236 | tau_timer.expires = jiffies + shrink_timer; | ||
237 | add_timer(&tau_timer); | ||
238 | |||
239 | on_each_cpu(TAU_init_smp, NULL, 1, 0); | ||
240 | |||
241 | printk("Thermal assist unit "); | ||
242 | #ifdef CONFIG_TAU_INT | ||
243 | printk("using interrupts, "); | ||
244 | #else | ||
245 | printk("using timers, "); | ||
246 | #endif | ||
247 | printk("shrink_timer: %d jiffies\n", shrink_timer); | ||
248 | tau_initialized = 1; | ||
249 | |||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | __initcall(TAU_init); | ||
254 | |||
255 | /* | ||
256 | * return current temp | ||
257 | */ | ||
258 | |||
259 | u32 cpu_temp_both(unsigned long cpu) | ||
260 | { | ||
261 | return ((tau[cpu].high << 16) | tau[cpu].low); | ||
262 | } | ||
263 | |||
264 | int cpu_temp(unsigned long cpu) | ||
265 | { | ||
266 | return ((tau[cpu].high + tau[cpu].low) / 2); | ||
267 | } | ||
268 | |||
269 | int tau_interrupts(unsigned long cpu) | ||
270 | { | ||
271 | return (tau[cpu].interrupts); | ||
272 | } | ||
diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c new file mode 100644 index 000000000000..50724139402c --- /dev/null +++ b/arch/ppc/kernel/time.c | |||
@@ -0,0 +1,447 @@ | |||
1 | /* | ||
2 | * Common time routines among all ppc machines. | ||
3 | * | ||
4 | * Written by Cort Dougan (cort@cs.nmt.edu) to merge | ||
5 | * Paul Mackerras' version and mine for PReP and Pmac. | ||
6 | * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net). | ||
7 | * | ||
8 | * First round of bugfixes by Gabriel Paubert (paubert@iram.es) | ||
9 | * to make clock more stable (2.4.0-test5). The only thing | ||
10 | * that this code assumes is that the timebases have been synchronized | ||
11 | * by firmware on SMP and are never stopped (never do sleep | ||
12 | * on SMP then, nap and doze are OK). | ||
13 | * | ||
14 | * TODO (not necessarily in this file): | ||
15 | * - improve precision and reproducibility of timebase frequency | ||
16 | * measurement at boot time. | ||
17 | * - get rid of xtime_lock for gettimeofday (generic kernel problem | ||
18 | * to be implemented on all architectures for SMP scalability and | ||
19 | * eventually implementing gettimeofday without entering the kernel). | ||
20 | * - put all time/clock related variables in a single structure | ||
21 | * to minimize number of cache lines touched by gettimeofday() | ||
22 | * - for astronomical applications: add a new function to get | ||
23 | * non ambiguous timestamps even around leap seconds. This needs | ||
24 | * a new timestamp format and a good name. | ||
25 | * | ||
26 | * | ||
27 | * The following comment is partially obsolete (at least the long wait | ||
28 | * is no more a valid reason): | ||
29 | * Since the MPC8xx has a programmable interrupt timer, I decided to | ||
30 | * use that rather than the decrementer. Two reasons: 1.) the clock | ||
31 | * frequency is low, causing 2.) a long wait in the timer interrupt | ||
32 | * while ((d = get_dec()) == dval) | ||
33 | * loop. The MPC8xx can be driven from a variety of input clocks, | ||
34 | * so a number of assumptions have been made here because the kernel | ||
35 | * parameter HZ is a constant. We assume (correctly, today :-) that | ||
36 | * the MPC8xx on the MBX board is driven from a 32.768 kHz crystal. | ||
37 | * This is then divided by 4, providing a 8192 Hz clock into the PIT. | ||
38 | * Since it is not possible to get a nice 100 Hz clock out of this, without | ||
39 | * creating a software PLL, I have set HZ to 128. -- Dan | ||
40 | * | ||
41 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | ||
42 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | ||
43 | */ | ||
44 | |||
45 | #include <linux/config.h> | ||
46 | #include <linux/errno.h> | ||
47 | #include <linux/sched.h> | ||
48 | #include <linux/kernel.h> | ||
49 | #include <linux/param.h> | ||
50 | #include <linux/string.h> | ||
51 | #include <linux/mm.h> | ||
52 | #include <linux/module.h> | ||
53 | #include <linux/interrupt.h> | ||
54 | #include <linux/timex.h> | ||
55 | #include <linux/kernel_stat.h> | ||
56 | #include <linux/mc146818rtc.h> | ||
57 | #include <linux/time.h> | ||
58 | #include <linux/init.h> | ||
59 | #include <linux/profile.h> | ||
60 | |||
61 | #include <asm/segment.h> | ||
62 | #include <asm/io.h> | ||
63 | #include <asm/nvram.h> | ||
64 | #include <asm/cache.h> | ||
65 | #include <asm/8xx_immap.h> | ||
66 | #include <asm/machdep.h> | ||
67 | |||
68 | #include <asm/time.h> | ||
69 | |||
70 | /* XXX false sharing with below? */ | ||
71 | u64 jiffies_64 = INITIAL_JIFFIES; | ||
72 | |||
73 | EXPORT_SYMBOL(jiffies_64); | ||
74 | |||
75 | unsigned long disarm_decr[NR_CPUS]; | ||
76 | |||
77 | extern struct timezone sys_tz; | ||
78 | |||
79 | /* keep track of when we need to update the rtc */ | ||
80 | time_t last_rtc_update; | ||
81 | |||
82 | /* The decrementer counts down by 128 every 128ns on a 601. */ | ||
83 | #define DECREMENTER_COUNT_601 (1000000000 / HZ) | ||
84 | |||
85 | unsigned tb_ticks_per_jiffy; | ||
86 | unsigned tb_to_us; | ||
87 | unsigned tb_last_stamp; | ||
88 | unsigned long tb_to_ns_scale; | ||
89 | |||
90 | extern unsigned long wall_jiffies; | ||
91 | |||
92 | static long time_offset; | ||
93 | |||
94 | DEFINE_SPINLOCK(rtc_lock); | ||
95 | |||
96 | EXPORT_SYMBOL(rtc_lock); | ||
97 | |||
98 | /* Timer interrupt helper function */ | ||
99 | static inline int tb_delta(unsigned *jiffy_stamp) { | ||
100 | int delta; | ||
101 | if (__USE_RTC()) { | ||
102 | delta = get_rtcl(); | ||
103 | if (delta < *jiffy_stamp) *jiffy_stamp -= 1000000000; | ||
104 | delta -= *jiffy_stamp; | ||
105 | } else { | ||
106 | delta = get_tbl() - *jiffy_stamp; | ||
107 | } | ||
108 | return delta; | ||
109 | } | ||
110 | |||
111 | #ifdef CONFIG_SMP | ||
112 | unsigned long profile_pc(struct pt_regs *regs) | ||
113 | { | ||
114 | unsigned long pc = instruction_pointer(regs); | ||
115 | |||
116 | if (in_lock_functions(pc)) | ||
117 | return regs->link; | ||
118 | |||
119 | return pc; | ||
120 | } | ||
121 | EXPORT_SYMBOL(profile_pc); | ||
122 | #endif | ||
123 | |||
124 | /* | ||
125 | * timer_interrupt - gets called when the decrementer overflows, | ||
126 | * with interrupts disabled. | ||
127 | * We set it up to overflow again in 1/HZ seconds. | ||
128 | */ | ||
129 | void timer_interrupt(struct pt_regs * regs) | ||
130 | { | ||
131 | int next_dec; | ||
132 | unsigned long cpu = smp_processor_id(); | ||
133 | unsigned jiffy_stamp = last_jiffy_stamp(cpu); | ||
134 | extern void do_IRQ(struct pt_regs *); | ||
135 | |||
136 | if (atomic_read(&ppc_n_lost_interrupts) != 0) | ||
137 | do_IRQ(regs); | ||
138 | |||
139 | irq_enter(); | ||
140 | |||
141 | while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) <= 0) { | ||
142 | jiffy_stamp += tb_ticks_per_jiffy; | ||
143 | |||
144 | profile_tick(CPU_PROFILING, regs); | ||
145 | update_process_times(user_mode(regs)); | ||
146 | |||
147 | if (smp_processor_id()) | ||
148 | continue; | ||
149 | |||
150 | /* We are in an interrupt, no need to save/restore flags */ | ||
151 | write_seqlock(&xtime_lock); | ||
152 | tb_last_stamp = jiffy_stamp; | ||
153 | do_timer(regs); | ||
154 | |||
155 | /* | ||
156 | * update the rtc when needed, this should be performed on the | ||
157 | * right fraction of a second. Half or full second ? | ||
158 | * Full second works on mk48t59 clocks, others need testing. | ||
159 | * Note that this update is basically only used through | ||
160 | * the adjtimex system calls. Setting the HW clock in | ||
161 | * any other way is a /dev/rtc and userland business. | ||
162 | * This is still wrong by -0.5/+1.5 jiffies because of the | ||
163 | * timer interrupt resolution and possible delay, but here we | ||
164 | * hit a quantization limit which can only be solved by higher | ||
165 | * resolution timers and decoupling time management from timer | ||
166 | * interrupts. This is also wrong on the clocks | ||
167 | * which require being written at the half second boundary. | ||
168 | * We should have an rtc call that only sets the minutes and | ||
169 | * seconds like on Intel to avoid problems with non UTC clocks. | ||
170 | */ | ||
171 | if ( ppc_md.set_rtc_time && (time_status & STA_UNSYNC) == 0 && | ||
172 | xtime.tv_sec - last_rtc_update >= 659 && | ||
173 | abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ && | ||
174 | jiffies - wall_jiffies == 1) { | ||
175 | if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset) == 0) | ||
176 | last_rtc_update = xtime.tv_sec+1; | ||
177 | else | ||
178 | /* Try again one minute later */ | ||
179 | last_rtc_update += 60; | ||
180 | } | ||
181 | write_sequnlock(&xtime_lock); | ||
182 | } | ||
183 | if ( !disarm_decr[smp_processor_id()] ) | ||
184 | set_dec(next_dec); | ||
185 | last_jiffy_stamp(cpu) = jiffy_stamp; | ||
186 | |||
187 | if (ppc_md.heartbeat && !ppc_md.heartbeat_count--) | ||
188 | ppc_md.heartbeat(); | ||
189 | |||
190 | irq_exit(); | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * This version of gettimeofday has microsecond resolution. | ||
195 | */ | ||
196 | void do_gettimeofday(struct timeval *tv) | ||
197 | { | ||
198 | unsigned long flags; | ||
199 | unsigned long seq; | ||
200 | unsigned delta, lost_ticks, usec, sec; | ||
201 | |||
202 | do { | ||
203 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | ||
204 | sec = xtime.tv_sec; | ||
205 | usec = (xtime.tv_nsec / 1000); | ||
206 | delta = tb_ticks_since(tb_last_stamp); | ||
207 | #ifdef CONFIG_SMP | ||
208 | /* As long as timebases are not in sync, gettimeofday can only | ||
209 | * have jiffy resolution on SMP. | ||
210 | */ | ||
211 | if (!smp_tb_synchronized) | ||
212 | delta = 0; | ||
213 | #endif /* CONFIG_SMP */ | ||
214 | lost_ticks = jiffies - wall_jiffies; | ||
215 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | ||
216 | |||
217 | usec += mulhwu(tb_to_us, tb_ticks_per_jiffy * lost_ticks + delta); | ||
218 | while (usec >= 1000000) { | ||
219 | sec++; | ||
220 | usec -= 1000000; | ||
221 | } | ||
222 | tv->tv_sec = sec; | ||
223 | tv->tv_usec = usec; | ||
224 | } | ||
225 | |||
226 | EXPORT_SYMBOL(do_gettimeofday); | ||
227 | |||
228 | int do_settimeofday(struct timespec *tv) | ||
229 | { | ||
230 | time_t wtm_sec, new_sec = tv->tv_sec; | ||
231 | long wtm_nsec, new_nsec = tv->tv_nsec; | ||
232 | unsigned long flags; | ||
233 | int tb_delta; | ||
234 | |||
235 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | ||
236 | return -EINVAL; | ||
237 | |||
238 | write_seqlock_irqsave(&xtime_lock, flags); | ||
239 | /* Updating the RTC is not the job of this code. If the time is | ||
240 | * stepped under NTP, the RTC will be update after STA_UNSYNC | ||
241 | * is cleared. Tool like clock/hwclock either copy the RTC | ||
242 | * to the system time, in which case there is no point in writing | ||
243 | * to the RTC again, or write to the RTC but then they don't call | ||
244 | * settimeofday to perform this operation. Note also that | ||
245 | * we don't touch the decrementer since: | ||
246 | * a) it would lose timer interrupt synchronization on SMP | ||
247 | * (if it is working one day) | ||
248 | * b) it could make one jiffy spuriously shorter or longer | ||
249 | * which would introduce another source of uncertainty potentially | ||
250 | * harmful to relatively short timers. | ||
251 | */ | ||
252 | |||
253 | /* This works perfectly on SMP only if the tb are in sync but | ||
254 | * guarantees an error < 1 jiffy even if they are off by eons, | ||
255 | * still reasonable when gettimeofday resolution is 1 jiffy. | ||
256 | */ | ||
257 | tb_delta = tb_ticks_since(last_jiffy_stamp(smp_processor_id())); | ||
258 | tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; | ||
259 | |||
260 | new_nsec -= 1000 * mulhwu(tb_to_us, tb_delta); | ||
261 | |||
262 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); | ||
263 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); | ||
264 | |||
265 | set_normalized_timespec(&xtime, new_sec, new_nsec); | ||
266 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | ||
267 | |||
268 | /* In case of a large backwards jump in time with NTP, we want the | ||
269 | * clock to be updated as soon as the PLL is again in lock. | ||
270 | */ | ||
271 | last_rtc_update = new_sec - 658; | ||
272 | |||
273 | time_adjust = 0; /* stop active adjtime() */ | ||
274 | time_status |= STA_UNSYNC; | ||
275 | time_state = TIME_ERROR; /* p. 24, (a) */ | ||
276 | time_maxerror = NTP_PHASE_LIMIT; | ||
277 | time_esterror = NTP_PHASE_LIMIT; | ||
278 | write_sequnlock_irqrestore(&xtime_lock, flags); | ||
279 | clock_was_set(); | ||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | EXPORT_SYMBOL(do_settimeofday); | ||
284 | |||
285 | /* This function is only called on the boot processor */ | ||
286 | void __init time_init(void) | ||
287 | { | ||
288 | time_t sec, old_sec; | ||
289 | unsigned old_stamp, stamp, elapsed; | ||
290 | |||
291 | if (ppc_md.time_init != NULL) | ||
292 | time_offset = ppc_md.time_init(); | ||
293 | |||
294 | if (__USE_RTC()) { | ||
295 | /* 601 processor: dec counts down by 128 every 128ns */ | ||
296 | tb_ticks_per_jiffy = DECREMENTER_COUNT_601; | ||
297 | /* mulhwu_scale_factor(1000000000, 1000000) is 0x418937 */ | ||
298 | tb_to_us = 0x418937; | ||
299 | } else { | ||
300 | ppc_md.calibrate_decr(); | ||
301 | tb_to_ns_scale = mulhwu(tb_to_us, 1000 << 10); | ||
302 | } | ||
303 | |||
304 | /* Now that the decrementer is calibrated, it can be used in case the | ||
305 | * clock is stuck, but the fact that we have to handle the 601 | ||
306 | * makes things more complex. Repeatedly read the RTC until the | ||
307 | * next second boundary to try to achieve some precision. If there | ||
308 | * is no RTC, we still need to set tb_last_stamp and | ||
309 | * last_jiffy_stamp(cpu 0) to the current stamp. | ||
310 | */ | ||
311 | stamp = get_native_tbl(); | ||
312 | if (ppc_md.get_rtc_time) { | ||
313 | sec = ppc_md.get_rtc_time(); | ||
314 | elapsed = 0; | ||
315 | do { | ||
316 | old_stamp = stamp; | ||
317 | old_sec = sec; | ||
318 | stamp = get_native_tbl(); | ||
319 | if (__USE_RTC() && stamp < old_stamp) | ||
320 | old_stamp -= 1000000000; | ||
321 | elapsed += stamp - old_stamp; | ||
322 | sec = ppc_md.get_rtc_time(); | ||
323 | } while ( sec == old_sec && elapsed < 2*HZ*tb_ticks_per_jiffy); | ||
324 | if (sec==old_sec) | ||
325 | printk("Warning: real time clock seems stuck!\n"); | ||
326 | xtime.tv_sec = sec; | ||
327 | xtime.tv_nsec = 0; | ||
328 | /* No update now, we just read the time from the RTC ! */ | ||
329 | last_rtc_update = xtime.tv_sec; | ||
330 | } | ||
331 | last_jiffy_stamp(0) = tb_last_stamp = stamp; | ||
332 | |||
333 | /* Not exact, but the timer interrupt takes care of this */ | ||
334 | set_dec(tb_ticks_per_jiffy); | ||
335 | |||
336 | /* If platform provided a timezone (pmac), we correct the time */ | ||
337 | if (time_offset) { | ||
338 | sys_tz.tz_minuteswest = -time_offset / 60; | ||
339 | sys_tz.tz_dsttime = 0; | ||
340 | xtime.tv_sec -= time_offset; | ||
341 | } | ||
342 | set_normalized_timespec(&wall_to_monotonic, | ||
343 | -xtime.tv_sec, -xtime.tv_nsec); | ||
344 | } | ||
345 | |||
346 | #define FEBRUARY 2 | ||
347 | #define STARTOFTIME 1970 | ||
348 | #define SECDAY 86400L | ||
349 | #define SECYR (SECDAY * 365) | ||
350 | |||
351 | /* | ||
352 | * Note: this is wrong for 2100, but our signed 32-bit time_t will | ||
353 | * have overflowed long before that, so who cares. -- paulus | ||
354 | */ | ||
355 | #define leapyear(year) ((year) % 4 == 0) | ||
356 | #define days_in_year(a) (leapyear(a) ? 366 : 365) | ||
357 | #define days_in_month(a) (month_days[(a) - 1]) | ||
358 | |||
359 | static int month_days[12] = { | ||
360 | 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 | ||
361 | }; | ||
362 | |||
363 | void to_tm(int tim, struct rtc_time * tm) | ||
364 | { | ||
365 | register int i; | ||
366 | register long hms, day, gday; | ||
367 | |||
368 | gday = day = tim / SECDAY; | ||
369 | hms = tim % SECDAY; | ||
370 | |||
371 | /* Hours, minutes, seconds are easy */ | ||
372 | tm->tm_hour = hms / 3600; | ||
373 | tm->tm_min = (hms % 3600) / 60; | ||
374 | tm->tm_sec = (hms % 3600) % 60; | ||
375 | |||
376 | /* Number of years in days */ | ||
377 | for (i = STARTOFTIME; day >= days_in_year(i); i++) | ||
378 | day -= days_in_year(i); | ||
379 | tm->tm_year = i; | ||
380 | |||
381 | /* Number of months in days left */ | ||
382 | if (leapyear(tm->tm_year)) | ||
383 | days_in_month(FEBRUARY) = 29; | ||
384 | for (i = 1; day >= days_in_month(i); i++) | ||
385 | day -= days_in_month(i); | ||
386 | days_in_month(FEBRUARY) = 28; | ||
387 | tm->tm_mon = i; | ||
388 | |||
389 | /* Days are what is left over (+1) from all that. */ | ||
390 | tm->tm_mday = day + 1; | ||
391 | |||
392 | /* | ||
393 | * Determine the day of week. Jan. 1, 1970 was a Thursday. | ||
394 | */ | ||
395 | tm->tm_wday = (gday + 4) % 7; | ||
396 | } | ||
397 | |||
398 | /* Auxiliary function to compute scaling factors */ | ||
399 | /* Actually the choice of a timebase running at 1/4 the of the bus | ||
400 | * frequency giving resolution of a few tens of nanoseconds is quite nice. | ||
401 | * It makes this computation very precise (27-28 bits typically) which | ||
402 | * is optimistic considering the stability of most processor clock | ||
403 | * oscillators and the precision with which the timebase frequency | ||
404 | * is measured but does not harm. | ||
405 | */ | ||
406 | unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) { | ||
407 | unsigned mlt=0, tmp, err; | ||
408 | /* No concern for performance, it's done once: use a stupid | ||
409 | * but safe and compact method to find the multiplier. | ||
410 | */ | ||
411 | for (tmp = 1U<<31; tmp != 0; tmp >>= 1) { | ||
412 | if (mulhwu(inscale, mlt|tmp) < outscale) mlt|=tmp; | ||
413 | } | ||
414 | /* We might still be off by 1 for the best approximation. | ||
415 | * A side effect of this is that if outscale is too large | ||
416 | * the returned value will be zero. | ||
417 | * Many corner cases have been checked and seem to work, | ||
418 | * some might have been forgotten in the test however. | ||
419 | */ | ||
420 | err = inscale*(mlt+1); | ||
421 | if (err <= inscale/2) mlt++; | ||
422 | return mlt; | ||
423 | } | ||
424 | |||
425 | unsigned long long sched_clock(void) | ||
426 | { | ||
427 | unsigned long lo, hi, hi2; | ||
428 | unsigned long long tb; | ||
429 | |||
430 | if (!__USE_RTC()) { | ||
431 | do { | ||
432 | hi = get_tbu(); | ||
433 | lo = get_tbl(); | ||
434 | hi2 = get_tbu(); | ||
435 | } while (hi2 != hi); | ||
436 | tb = ((unsigned long long) hi << 32) | lo; | ||
437 | tb = (tb * tb_to_ns_scale) >> 10; | ||
438 | } else { | ||
439 | do { | ||
440 | hi = get_rtcu(); | ||
441 | lo = get_rtcl(); | ||
442 | hi2 = get_rtcu(); | ||
443 | } while (hi2 != hi); | ||
444 | tb = ((unsigned long long) hi) * 1000000000 + lo; | ||
445 | } | ||
446 | return tb; | ||
447 | } | ||
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c new file mode 100644 index 000000000000..ed5c7acdca70 --- /dev/null +++ b/arch/ppc/kernel/traps.c | |||
@@ -0,0 +1,886 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/traps.c | ||
3 | * | ||
4 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * Modified by Cort Dougan (cort@cs.nmt.edu) | ||
12 | * and Paul Mackerras (paulus@cs.anu.edu.au) | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * This file handles the architecture-dependent parts of hardware exceptions | ||
17 | */ | ||
18 | |||
19 | #include <linux/errno.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <linux/stddef.h> | ||
24 | #include <linux/unistd.h> | ||
25 | #include <linux/ptrace.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/user.h> | ||
28 | #include <linux/a.out.h> | ||
29 | #include <linux/interrupt.h> | ||
30 | #include <linux/config.h> | ||
31 | #include <linux/init.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/prctl.h> | ||
34 | |||
35 | #include <asm/pgtable.h> | ||
36 | #include <asm/uaccess.h> | ||
37 | #include <asm/system.h> | ||
38 | #include <asm/io.h> | ||
39 | #include <asm/reg.h> | ||
40 | #include <asm/xmon.h> | ||
41 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
42 | #include <asm/backlight.h> | ||
43 | #endif | ||
44 | #include <asm/perfmon.h> | ||
45 | |||
46 | #ifdef CONFIG_XMON | ||
47 | void (*debugger)(struct pt_regs *regs) = xmon; | ||
48 | int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt; | ||
49 | int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep; | ||
50 | int (*debugger_iabr_match)(struct pt_regs *regs) = xmon_iabr_match; | ||
51 | int (*debugger_dabr_match)(struct pt_regs *regs) = xmon_dabr_match; | ||
52 | void (*debugger_fault_handler)(struct pt_regs *regs); | ||
53 | #else | ||
54 | #ifdef CONFIG_KGDB | ||
55 | void (*debugger)(struct pt_regs *regs); | ||
56 | int (*debugger_bpt)(struct pt_regs *regs); | ||
57 | int (*debugger_sstep)(struct pt_regs *regs); | ||
58 | int (*debugger_iabr_match)(struct pt_regs *regs); | ||
59 | int (*debugger_dabr_match)(struct pt_regs *regs); | ||
60 | void (*debugger_fault_handler)(struct pt_regs *regs); | ||
61 | #else | ||
62 | #define debugger(regs) do { } while (0) | ||
63 | #define debugger_bpt(regs) 0 | ||
64 | #define debugger_sstep(regs) 0 | ||
65 | #define debugger_iabr_match(regs) 0 | ||
66 | #define debugger_dabr_match(regs) 0 | ||
67 | #define debugger_fault_handler ((void (*)(struct pt_regs *))0) | ||
68 | #endif | ||
69 | #endif | ||
70 | |||
71 | /* | ||
72 | * Trap & Exception support | ||
73 | */ | ||
74 | |||
75 | DEFINE_SPINLOCK(die_lock); | ||
76 | |||
77 | void die(const char * str, struct pt_regs * fp, long err) | ||
78 | { | ||
79 | static int die_counter; | ||
80 | int nl = 0; | ||
81 | console_verbose(); | ||
82 | spin_lock_irq(&die_lock); | ||
83 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
84 | set_backlight_enable(1); | ||
85 | set_backlight_level(BACKLIGHT_MAX); | ||
86 | #endif | ||
87 | printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); | ||
88 | #ifdef CONFIG_PREEMPT | ||
89 | printk("PREEMPT "); | ||
90 | nl = 1; | ||
91 | #endif | ||
92 | #ifdef CONFIG_SMP | ||
93 | printk("SMP NR_CPUS=%d ", NR_CPUS); | ||
94 | nl = 1; | ||
95 | #endif | ||
96 | if (nl) | ||
97 | printk("\n"); | ||
98 | show_regs(fp); | ||
99 | spin_unlock_irq(&die_lock); | ||
100 | /* do_exit() should take care of panic'ing from an interrupt | ||
101 | * context so we don't handle it here | ||
102 | */ | ||
103 | do_exit(err); | ||
104 | } | ||
105 | |||
106 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) | ||
107 | { | ||
108 | siginfo_t info; | ||
109 | |||
110 | if (!user_mode(regs)) { | ||
111 | debugger(regs); | ||
112 | die("Exception in kernel mode", regs, signr); | ||
113 | } | ||
114 | info.si_signo = signr; | ||
115 | info.si_errno = 0; | ||
116 | info.si_code = code; | ||
117 | info.si_addr = (void __user *) addr; | ||
118 | force_sig_info(signr, &info, current); | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * I/O accesses can cause machine checks on powermacs. | ||
123 | * Check if the NIP corresponds to the address of a sync | ||
124 | * instruction for which there is an entry in the exception | ||
125 | * table. | ||
126 | * Note that the 601 only takes a machine check on TEA | ||
127 | * (transfer error ack) signal assertion, and does not | ||
128 | * set any of the top 16 bits of SRR1. | ||
129 | * -- paulus. | ||
130 | */ | ||
131 | static inline int check_io_access(struct pt_regs *regs) | ||
132 | { | ||
133 | #ifdef CONFIG_PPC_PMAC | ||
134 | unsigned long msr = regs->msr; | ||
135 | const struct exception_table_entry *entry; | ||
136 | unsigned int *nip = (unsigned int *)regs->nip; | ||
137 | |||
138 | if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) | ||
139 | && (entry = search_exception_tables(regs->nip)) != NULL) { | ||
140 | /* | ||
141 | * Check that it's a sync instruction, or somewhere | ||
142 | * in the twi; isync; nop sequence that inb/inw/inl uses. | ||
143 | * As the address is in the exception table | ||
144 | * we should be able to read the instr there. | ||
145 | * For the debug message, we look at the preceding | ||
146 | * load or store. | ||
147 | */ | ||
148 | if (*nip == 0x60000000) /* nop */ | ||
149 | nip -= 2; | ||
150 | else if (*nip == 0x4c00012c) /* isync */ | ||
151 | --nip; | ||
152 | if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { | ||
153 | /* sync or twi */ | ||
154 | unsigned int rb; | ||
155 | |||
156 | --nip; | ||
157 | rb = (*nip >> 11) & 0x1f; | ||
158 | printk(KERN_DEBUG "%s bad port %lx at %p\n", | ||
159 | (*nip & 0x100)? "OUT to": "IN from", | ||
160 | regs->gpr[rb] - _IO_BASE, nip); | ||
161 | regs->msr |= MSR_RI; | ||
162 | regs->nip = entry->fixup; | ||
163 | return 1; | ||
164 | } | ||
165 | } | ||
166 | #endif /* CONFIG_PPC_PMAC */ | ||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
171 | /* On 4xx, the reason for the machine check or program exception | ||
172 | is in the ESR. */ | ||
173 | #define get_reason(regs) ((regs)->dsisr) | ||
174 | #ifndef CONFIG_E500 | ||
175 | #define get_mc_reason(regs) ((regs)->dsisr) | ||
176 | #else | ||
177 | #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) | ||
178 | #endif | ||
179 | #define REASON_FP 0 | ||
180 | #define REASON_ILLEGAL ESR_PIL | ||
181 | #define REASON_PRIVILEGED ESR_PPR | ||
182 | #define REASON_TRAP ESR_PTR | ||
183 | |||
184 | /* single-step stuff */ | ||
185 | #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) | ||
186 | #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) | ||
187 | |||
188 | #else | ||
189 | /* On non-4xx, the reason for the machine check or program | ||
190 | exception is in the MSR. */ | ||
191 | #define get_reason(regs) ((regs)->msr) | ||
192 | #define get_mc_reason(regs) ((regs)->msr) | ||
193 | #define REASON_FP 0x100000 | ||
194 | #define REASON_ILLEGAL 0x80000 | ||
195 | #define REASON_PRIVILEGED 0x40000 | ||
196 | #define REASON_TRAP 0x20000 | ||
197 | |||
198 | #define single_stepping(regs) ((regs)->msr & MSR_SE) | ||
199 | #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) | ||
200 | #endif | ||
201 | |||
202 | /* | ||
203 | * This is "fall-back" implementation for configurations | ||
204 | * which don't provide platform-specific machine check info | ||
205 | */ | ||
206 | void __attribute__ ((weak)) | ||
207 | platform_machine_check(struct pt_regs *regs) | ||
208 | { | ||
209 | } | ||
210 | |||
211 | void MachineCheckException(struct pt_regs *regs) | ||
212 | { | ||
213 | unsigned long reason = get_mc_reason(regs); | ||
214 | |||
215 | if (user_mode(regs)) { | ||
216 | regs->msr |= MSR_RI; | ||
217 | _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); | ||
218 | return; | ||
219 | } | ||
220 | |||
221 | #if defined(CONFIG_8xx) && defined(CONFIG_PCI) | ||
222 | /* the qspan pci read routines can cause machine checks -- Cort */ | ||
223 | bad_page_fault(regs, regs->dar, SIGBUS); | ||
224 | return; | ||
225 | #endif | ||
226 | |||
227 | if (debugger_fault_handler) { | ||
228 | debugger_fault_handler(regs); | ||
229 | regs->msr |= MSR_RI; | ||
230 | return; | ||
231 | } | ||
232 | |||
233 | if (check_io_access(regs)) | ||
234 | return; | ||
235 | |||
236 | #if defined(CONFIG_4xx) && !defined(CONFIG_440A) | ||
237 | if (reason & ESR_IMCP) { | ||
238 | printk("Instruction"); | ||
239 | mtspr(SPRN_ESR, reason & ~ESR_IMCP); | ||
240 | } else | ||
241 | printk("Data"); | ||
242 | printk(" machine check in kernel mode.\n"); | ||
243 | #elif defined(CONFIG_440A) | ||
244 | printk("Machine check in kernel mode.\n"); | ||
245 | if (reason & ESR_IMCP){ | ||
246 | printk("Instruction Synchronous Machine Check exception\n"); | ||
247 | mtspr(SPRN_ESR, reason & ~ESR_IMCP); | ||
248 | } | ||
249 | else { | ||
250 | u32 mcsr = mfspr(SPRN_MCSR); | ||
251 | if (mcsr & MCSR_IB) | ||
252 | printk("Instruction Read PLB Error\n"); | ||
253 | if (mcsr & MCSR_DRB) | ||
254 | printk("Data Read PLB Error\n"); | ||
255 | if (mcsr & MCSR_DWB) | ||
256 | printk("Data Write PLB Error\n"); | ||
257 | if (mcsr & MCSR_TLBP) | ||
258 | printk("TLB Parity Error\n"); | ||
259 | if (mcsr & MCSR_ICP){ | ||
260 | flush_instruction_cache(); | ||
261 | printk("I-Cache Parity Error\n"); | ||
262 | } | ||
263 | if (mcsr & MCSR_DCSP) | ||
264 | printk("D-Cache Search Parity Error\n"); | ||
265 | if (mcsr & MCSR_DCFP) | ||
266 | printk("D-Cache Flush Parity Error\n"); | ||
267 | if (mcsr & MCSR_IMPE) | ||
268 | printk("Machine Check exception is imprecise\n"); | ||
269 | |||
270 | /* Clear MCSR */ | ||
271 | mtspr(SPRN_MCSR, mcsr); | ||
272 | } | ||
273 | #elif defined (CONFIG_E500) | ||
274 | printk("Machine check in kernel mode.\n"); | ||
275 | printk("Caused by (from MCSR=%lx): ", reason); | ||
276 | |||
277 | if (reason & MCSR_MCP) | ||
278 | printk("Machine Check Signal\n"); | ||
279 | if (reason & MCSR_ICPERR) | ||
280 | printk("Instruction Cache Parity Error\n"); | ||
281 | if (reason & MCSR_DCP_PERR) | ||
282 | printk("Data Cache Push Parity Error\n"); | ||
283 | if (reason & MCSR_DCPERR) | ||
284 | printk("Data Cache Parity Error\n"); | ||
285 | if (reason & MCSR_GL_CI) | ||
286 | printk("Guarded Load or Cache-Inhibited stwcx.\n"); | ||
287 | if (reason & MCSR_BUS_IAERR) | ||
288 | printk("Bus - Instruction Address Error\n"); | ||
289 | if (reason & MCSR_BUS_RAERR) | ||
290 | printk("Bus - Read Address Error\n"); | ||
291 | if (reason & MCSR_BUS_WAERR) | ||
292 | printk("Bus - Write Address Error\n"); | ||
293 | if (reason & MCSR_BUS_IBERR) | ||
294 | printk("Bus - Instruction Data Error\n"); | ||
295 | if (reason & MCSR_BUS_RBERR) | ||
296 | printk("Bus - Read Data Bus Error\n"); | ||
297 | if (reason & MCSR_BUS_WBERR) | ||
298 | printk("Bus - Read Data Bus Error\n"); | ||
299 | if (reason & MCSR_BUS_IPERR) | ||
300 | printk("Bus - Instruction Parity Error\n"); | ||
301 | if (reason & MCSR_BUS_RPERR) | ||
302 | printk("Bus - Read Parity Error\n"); | ||
303 | #else /* !CONFIG_4xx && !CONFIG_E500 */ | ||
304 | printk("Machine check in kernel mode.\n"); | ||
305 | printk("Caused by (from SRR1=%lx): ", reason); | ||
306 | switch (reason & 0x601F0000) { | ||
307 | case 0x80000: | ||
308 | printk("Machine check signal\n"); | ||
309 | break; | ||
310 | case 0: /* for 601 */ | ||
311 | case 0x40000: | ||
312 | case 0x140000: /* 7450 MSS error and TEA */ | ||
313 | printk("Transfer error ack signal\n"); | ||
314 | break; | ||
315 | case 0x20000: | ||
316 | printk("Data parity error signal\n"); | ||
317 | break; | ||
318 | case 0x10000: | ||
319 | printk("Address parity error signal\n"); | ||
320 | break; | ||
321 | case 0x20000000: | ||
322 | printk("L1 Data Cache error\n"); | ||
323 | break; | ||
324 | case 0x40000000: | ||
325 | printk("L1 Instruction Cache error\n"); | ||
326 | break; | ||
327 | case 0x00100000: | ||
328 | printk("L2 data cache parity error\n"); | ||
329 | break; | ||
330 | default: | ||
331 | printk("Unknown values in msr\n"); | ||
332 | } | ||
333 | #endif /* CONFIG_4xx */ | ||
334 | |||
335 | /* | ||
336 | * Optional platform-provided routine to print out | ||
337 | * additional info, e.g. bus error registers. | ||
338 | */ | ||
339 | platform_machine_check(regs); | ||
340 | |||
341 | debugger(regs); | ||
342 | die("machine check", regs, SIGBUS); | ||
343 | } | ||
344 | |||
345 | void SMIException(struct pt_regs *regs) | ||
346 | { | ||
347 | debugger(regs); | ||
348 | #if !(defined(CONFIG_XMON) || defined(CONFIG_KGDB)) | ||
349 | show_regs(regs); | ||
350 | panic("System Management Interrupt"); | ||
351 | #endif | ||
352 | } | ||
353 | |||
354 | void UnknownException(struct pt_regs *regs) | ||
355 | { | ||
356 | printk("Bad trap at PC: %lx, MSR: %lx, vector=%lx %s\n", | ||
357 | regs->nip, regs->msr, regs->trap, print_tainted()); | ||
358 | _exception(SIGTRAP, regs, 0, 0); | ||
359 | } | ||
360 | |||
361 | void InstructionBreakpoint(struct pt_regs *regs) | ||
362 | { | ||
363 | if (debugger_iabr_match(regs)) | ||
364 | return; | ||
365 | _exception(SIGTRAP, regs, TRAP_BRKPT, 0); | ||
366 | } | ||
367 | |||
368 | void RunModeException(struct pt_regs *regs) | ||
369 | { | ||
370 | _exception(SIGTRAP, regs, 0, 0); | ||
371 | } | ||
372 | |||
373 | /* Illegal instruction emulation support. Originally written to | ||
374 | * provide the PVR to user applications using the mfspr rd, PVR. | ||
375 | * Return non-zero if we can't emulate, or -EFAULT if the associated | ||
376 | * memory access caused an access fault. Return zero on success. | ||
377 | * | ||
378 | * There are a couple of ways to do this, either "decode" the instruction | ||
379 | * or directly match lots of bits. In this case, matching lots of | ||
380 | * bits is faster and easier. | ||
381 | * | ||
382 | */ | ||
383 | #define INST_MFSPR_PVR 0x7c1f42a6 | ||
384 | #define INST_MFSPR_PVR_MASK 0xfc1fffff | ||
385 | |||
386 | #define INST_DCBA 0x7c0005ec | ||
387 | #define INST_DCBA_MASK 0x7c0007fe | ||
388 | |||
389 | #define INST_MCRXR 0x7c000400 | ||
390 | #define INST_MCRXR_MASK 0x7c0007fe | ||
391 | |||
392 | #define INST_STRING 0x7c00042a | ||
393 | #define INST_STRING_MASK 0x7c0007fe | ||
394 | #define INST_STRING_GEN_MASK 0x7c00067e | ||
395 | #define INST_LSWI 0x7c0004aa | ||
396 | #define INST_LSWX 0x7c00042a | ||
397 | #define INST_STSWI 0x7c0005aa | ||
398 | #define INST_STSWX 0x7c00052a | ||
399 | |||
400 | static int emulate_string_inst(struct pt_regs *regs, u32 instword) | ||
401 | { | ||
402 | u8 rT = (instword >> 21) & 0x1f; | ||
403 | u8 rA = (instword >> 16) & 0x1f; | ||
404 | u8 NB_RB = (instword >> 11) & 0x1f; | ||
405 | u32 num_bytes; | ||
406 | u32 EA; | ||
407 | int pos = 0; | ||
408 | |||
409 | /* Early out if we are an invalid form of lswx */ | ||
410 | if ((instword & INST_STRING_MASK) == INST_LSWX) | ||
411 | if ((rA >= rT) || (NB_RB >= rT) || (rT == rA) || (rT == NB_RB)) | ||
412 | return -EINVAL; | ||
413 | |||
414 | /* Early out if we are an invalid form of lswi */ | ||
415 | if ((instword & INST_STRING_MASK) == INST_LSWI) | ||
416 | if ((rA >= rT) || (rT == rA)) | ||
417 | return -EINVAL; | ||
418 | |||
419 | EA = (rA == 0) ? 0 : regs->gpr[rA]; | ||
420 | |||
421 | switch (instword & INST_STRING_MASK) { | ||
422 | case INST_LSWX: | ||
423 | case INST_STSWX: | ||
424 | EA += NB_RB; | ||
425 | num_bytes = regs->xer & 0x7f; | ||
426 | break; | ||
427 | case INST_LSWI: | ||
428 | case INST_STSWI: | ||
429 | num_bytes = (NB_RB == 0) ? 32 : NB_RB; | ||
430 | break; | ||
431 | default: | ||
432 | return -EINVAL; | ||
433 | } | ||
434 | |||
435 | while (num_bytes != 0) | ||
436 | { | ||
437 | u8 val; | ||
438 | u32 shift = 8 * (3 - (pos & 0x3)); | ||
439 | |||
440 | switch ((instword & INST_STRING_MASK)) { | ||
441 | case INST_LSWX: | ||
442 | case INST_LSWI: | ||
443 | if (get_user(val, (u8 __user *)EA)) | ||
444 | return -EFAULT; | ||
445 | /* first time updating this reg, | ||
446 | * zero it out */ | ||
447 | if (pos == 0) | ||
448 | regs->gpr[rT] = 0; | ||
449 | regs->gpr[rT] |= val << shift; | ||
450 | break; | ||
451 | case INST_STSWI: | ||
452 | case INST_STSWX: | ||
453 | val = regs->gpr[rT] >> shift; | ||
454 | if (put_user(val, (u8 __user *)EA)) | ||
455 | return -EFAULT; | ||
456 | break; | ||
457 | } | ||
458 | /* move EA to next address */ | ||
459 | EA += 1; | ||
460 | num_bytes--; | ||
461 | |||
462 | /* manage our position within the register */ | ||
463 | if (++pos == 4) { | ||
464 | pos = 0; | ||
465 | if (++rT == 32) | ||
466 | rT = 0; | ||
467 | } | ||
468 | } | ||
469 | |||
470 | return 0; | ||
471 | } | ||
472 | |||
473 | static int emulate_instruction(struct pt_regs *regs) | ||
474 | { | ||
475 | u32 instword; | ||
476 | u32 rd; | ||
477 | |||
478 | if (!user_mode(regs)) | ||
479 | return -EINVAL; | ||
480 | CHECK_FULL_REGS(regs); | ||
481 | |||
482 | if (get_user(instword, (u32 __user *)(regs->nip))) | ||
483 | return -EFAULT; | ||
484 | |||
485 | /* Emulate the mfspr rD, PVR. | ||
486 | */ | ||
487 | if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) { | ||
488 | rd = (instword >> 21) & 0x1f; | ||
489 | regs->gpr[rd] = mfspr(SPRN_PVR); | ||
490 | return 0; | ||
491 | } | ||
492 | |||
493 | /* Emulating the dcba insn is just a no-op. */ | ||
494 | if ((instword & INST_DCBA_MASK) == INST_DCBA) | ||
495 | return 0; | ||
496 | |||
497 | /* Emulate the mcrxr insn. */ | ||
498 | if ((instword & INST_MCRXR_MASK) == INST_MCRXR) { | ||
499 | int shift = (instword >> 21) & 0x1c; | ||
500 | unsigned long msk = 0xf0000000UL >> shift; | ||
501 | |||
502 | regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); | ||
503 | regs->xer &= ~0xf0000000UL; | ||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | /* Emulate load/store string insn. */ | ||
508 | if ((instword & INST_STRING_GEN_MASK) == INST_STRING) | ||
509 | return emulate_string_inst(regs, instword); | ||
510 | |||
511 | return -EINVAL; | ||
512 | } | ||
513 | |||
514 | /* | ||
515 | * After we have successfully emulated an instruction, we have to | ||
516 | * check if the instruction was being single-stepped, and if so, | ||
517 | * pretend we got a single-step exception. This was pointed out | ||
518 | * by Kumar Gala. -- paulus | ||
519 | */ | ||
520 | static void emulate_single_step(struct pt_regs *regs) | ||
521 | { | ||
522 | if (single_stepping(regs)) { | ||
523 | clear_single_step(regs); | ||
524 | _exception(SIGTRAP, regs, TRAP_TRACE, 0); | ||
525 | } | ||
526 | } | ||
527 | |||
528 | /* | ||
529 | * Look through the list of trap instructions that are used for BUG(), | ||
530 | * BUG_ON() and WARN_ON() and see if we hit one. At this point we know | ||
531 | * that the exception was caused by a trap instruction of some kind. | ||
532 | * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0 | ||
533 | * otherwise. | ||
534 | */ | ||
535 | extern struct bug_entry __start___bug_table[], __stop___bug_table[]; | ||
536 | |||
537 | #ifndef CONFIG_MODULES | ||
538 | #define module_find_bug(x) NULL | ||
539 | #endif | ||
540 | |||
541 | static struct bug_entry *find_bug(unsigned long bugaddr) | ||
542 | { | ||
543 | struct bug_entry *bug; | ||
544 | |||
545 | for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) | ||
546 | if (bugaddr == bug->bug_addr) | ||
547 | return bug; | ||
548 | return module_find_bug(bugaddr); | ||
549 | } | ||
550 | |||
551 | int check_bug_trap(struct pt_regs *regs) | ||
552 | { | ||
553 | struct bug_entry *bug; | ||
554 | unsigned long addr; | ||
555 | |||
556 | if (regs->msr & MSR_PR) | ||
557 | return 0; /* not in kernel */ | ||
558 | addr = regs->nip; /* address of trap instruction */ | ||
559 | if (addr < PAGE_OFFSET) | ||
560 | return 0; | ||
561 | bug = find_bug(regs->nip); | ||
562 | if (bug == NULL) | ||
563 | return 0; | ||
564 | if (bug->line & BUG_WARNING_TRAP) { | ||
565 | /* this is a WARN_ON rather than BUG/BUG_ON */ | ||
566 | #ifdef CONFIG_XMON | ||
567 | xmon_printf(KERN_ERR "Badness in %s at %s:%d\n", | ||
568 | bug->function, bug->file, | ||
569 | bug->line & ~BUG_WARNING_TRAP); | ||
570 | #endif /* CONFIG_XMON */ | ||
571 | printk(KERN_ERR "Badness in %s at %s:%d\n", | ||
572 | bug->function, bug->file, | ||
573 | bug->line & ~BUG_WARNING_TRAP); | ||
574 | dump_stack(); | ||
575 | return 1; | ||
576 | } | ||
577 | #ifdef CONFIG_XMON | ||
578 | xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%d!\n", | ||
579 | bug->function, bug->file, bug->line); | ||
580 | xmon(regs); | ||
581 | #endif /* CONFIG_XMON */ | ||
582 | printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n", | ||
583 | bug->function, bug->file, bug->line); | ||
584 | |||
585 | return 0; | ||
586 | } | ||
587 | |||
588 | void ProgramCheckException(struct pt_regs *regs) | ||
589 | { | ||
590 | unsigned int reason = get_reason(regs); | ||
591 | extern int do_mathemu(struct pt_regs *regs); | ||
592 | |||
593 | #ifdef CONFIG_MATH_EMULATION | ||
594 | /* (reason & REASON_ILLEGAL) would be the obvious thing here, | ||
595 | * but there seems to be a hardware bug on the 405GP (RevD) | ||
596 | * that means ESR is sometimes set incorrectly - either to | ||
597 | * ESR_DST (!?) or 0. In the process of chasing this with the | ||
598 | * hardware people - not sure if it can happen on any illegal | ||
599 | * instruction or only on FP instructions, whether there is a | ||
600 | * pattern to occurences etc. -dgibson 31/Mar/2003 */ | ||
601 | if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) { | ||
602 | emulate_single_step(regs); | ||
603 | return; | ||
604 | } | ||
605 | #endif /* CONFIG_MATH_EMULATION */ | ||
606 | |||
607 | if (reason & REASON_FP) { | ||
608 | /* IEEE FP exception */ | ||
609 | int code = 0; | ||
610 | u32 fpscr; | ||
611 | |||
612 | /* We must make sure the FP state is consistent with | ||
613 | * our MSR_FP in regs | ||
614 | */ | ||
615 | preempt_disable(); | ||
616 | if (regs->msr & MSR_FP) | ||
617 | giveup_fpu(current); | ||
618 | preempt_enable(); | ||
619 | |||
620 | fpscr = current->thread.fpscr; | ||
621 | fpscr &= fpscr << 22; /* mask summary bits with enables */ | ||
622 | if (fpscr & FPSCR_VX) | ||
623 | code = FPE_FLTINV; | ||
624 | else if (fpscr & FPSCR_OX) | ||
625 | code = FPE_FLTOVF; | ||
626 | else if (fpscr & FPSCR_UX) | ||
627 | code = FPE_FLTUND; | ||
628 | else if (fpscr & FPSCR_ZX) | ||
629 | code = FPE_FLTDIV; | ||
630 | else if (fpscr & FPSCR_XX) | ||
631 | code = FPE_FLTRES; | ||
632 | _exception(SIGFPE, regs, code, regs->nip); | ||
633 | return; | ||
634 | } | ||
635 | |||
636 | if (reason & REASON_TRAP) { | ||
637 | /* trap exception */ | ||
638 | if (debugger_bpt(regs)) | ||
639 | return; | ||
640 | if (check_bug_trap(regs)) { | ||
641 | regs->nip += 4; | ||
642 | return; | ||
643 | } | ||
644 | _exception(SIGTRAP, regs, TRAP_BRKPT, 0); | ||
645 | return; | ||
646 | } | ||
647 | |||
648 | /* Try to emulate it if we should. */ | ||
649 | if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { | ||
650 | switch (emulate_instruction(regs)) { | ||
651 | case 0: | ||
652 | regs->nip += 4; | ||
653 | emulate_single_step(regs); | ||
654 | return; | ||
655 | case -EFAULT: | ||
656 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); | ||
657 | return; | ||
658 | } | ||
659 | } | ||
660 | |||
661 | if (reason & REASON_PRIVILEGED) | ||
662 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); | ||
663 | else | ||
664 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | ||
665 | } | ||
666 | |||
667 | void SingleStepException(struct pt_regs *regs) | ||
668 | { | ||
669 | regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */ | ||
670 | if (debugger_sstep(regs)) | ||
671 | return; | ||
672 | _exception(SIGTRAP, regs, TRAP_TRACE, 0); | ||
673 | } | ||
674 | |||
675 | void AlignmentException(struct pt_regs *regs) | ||
676 | { | ||
677 | int fixed; | ||
678 | |||
679 | fixed = fix_alignment(regs); | ||
680 | if (fixed == 1) { | ||
681 | regs->nip += 4; /* skip over emulated instruction */ | ||
682 | return; | ||
683 | } | ||
684 | if (fixed == -EFAULT) { | ||
685 | /* fixed == -EFAULT means the operand address was bad */ | ||
686 | if (user_mode(regs)) | ||
687 | _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar); | ||
688 | else | ||
689 | bad_page_fault(regs, regs->dar, SIGSEGV); | ||
690 | return; | ||
691 | } | ||
692 | _exception(SIGBUS, regs, BUS_ADRALN, regs->dar); | ||
693 | } | ||
694 | |||
695 | void StackOverflow(struct pt_regs *regs) | ||
696 | { | ||
697 | printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", | ||
698 | current, regs->gpr[1]); | ||
699 | debugger(regs); | ||
700 | show_regs(regs); | ||
701 | panic("kernel stack overflow"); | ||
702 | } | ||
703 | |||
704 | void nonrecoverable_exception(struct pt_regs *regs) | ||
705 | { | ||
706 | printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", | ||
707 | regs->nip, regs->msr); | ||
708 | debugger(regs); | ||
709 | die("nonrecoverable exception", regs, SIGKILL); | ||
710 | } | ||
711 | |||
712 | void trace_syscall(struct pt_regs *regs) | ||
713 | { | ||
714 | printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", | ||
715 | current, current->pid, regs->nip, regs->link, regs->gpr[0], | ||
716 | regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); | ||
717 | } | ||
718 | |||
719 | #ifdef CONFIG_8xx | ||
720 | void SoftwareEmulation(struct pt_regs *regs) | ||
721 | { | ||
722 | extern int do_mathemu(struct pt_regs *); | ||
723 | extern int Soft_emulate_8xx(struct pt_regs *); | ||
724 | int errcode; | ||
725 | |||
726 | CHECK_FULL_REGS(regs); | ||
727 | |||
728 | if (!user_mode(regs)) { | ||
729 | debugger(regs); | ||
730 | die("Kernel Mode Software FPU Emulation", regs, SIGFPE); | ||
731 | } | ||
732 | |||
733 | #ifdef CONFIG_MATH_EMULATION | ||
734 | errcode = do_mathemu(regs); | ||
735 | #else | ||
736 | errcode = Soft_emulate_8xx(regs); | ||
737 | #endif | ||
738 | if (errcode) { | ||
739 | if (errcode > 0) | ||
740 | _exception(SIGFPE, regs, 0, 0); | ||
741 | else if (errcode == -EFAULT) | ||
742 | _exception(SIGSEGV, regs, 0, 0); | ||
743 | else | ||
744 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | ||
745 | } else | ||
746 | emulate_single_step(regs); | ||
747 | } | ||
748 | #endif /* CONFIG_8xx */ | ||
749 | |||
750 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | ||
751 | |||
752 | void DebugException(struct pt_regs *regs, unsigned long debug_status) | ||
753 | { | ||
754 | if (debug_status & DBSR_IC) { /* instruction completion */ | ||
755 | regs->msr &= ~MSR_DE; | ||
756 | if (user_mode(regs)) { | ||
757 | current->thread.dbcr0 &= ~DBCR0_IC; | ||
758 | } else { | ||
759 | /* Disable instruction completion */ | ||
760 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); | ||
761 | /* Clear the instruction completion event */ | ||
762 | mtspr(SPRN_DBSR, DBSR_IC); | ||
763 | if (debugger_sstep(regs)) | ||
764 | return; | ||
765 | } | ||
766 | _exception(SIGTRAP, regs, TRAP_TRACE, 0); | ||
767 | } | ||
768 | } | ||
769 | #endif /* CONFIG_4xx || CONFIG_BOOKE */ | ||
770 | |||
771 | #if !defined(CONFIG_TAU_INT) | ||
772 | void TAUException(struct pt_regs *regs) | ||
773 | { | ||
774 | printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", | ||
775 | regs->nip, regs->msr, regs->trap, print_tainted()); | ||
776 | } | ||
777 | #endif /* CONFIG_INT_TAU */ | ||
778 | |||
779 | void AltivecUnavailException(struct pt_regs *regs) | ||
780 | { | ||
781 | static int kernel_altivec_count; | ||
782 | |||
783 | #ifndef CONFIG_ALTIVEC | ||
784 | if (user_mode(regs)) { | ||
785 | /* A user program has executed an altivec instruction, | ||
786 | but this kernel doesn't support altivec. */ | ||
787 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | ||
788 | return; | ||
789 | } | ||
790 | #endif | ||
791 | /* The kernel has executed an altivec instruction without | ||
792 | first enabling altivec. Whinge but let it do it. */ | ||
793 | if (++kernel_altivec_count < 10) | ||
794 | printk(KERN_ERR "AltiVec used in kernel (task=%p, pc=%lx)\n", | ||
795 | current, regs->nip); | ||
796 | regs->msr |= MSR_VEC; | ||
797 | } | ||
798 | |||
799 | #ifdef CONFIG_ALTIVEC | ||
800 | void AltivecAssistException(struct pt_regs *regs) | ||
801 | { | ||
802 | int err; | ||
803 | |||
804 | preempt_disable(); | ||
805 | if (regs->msr & MSR_VEC) | ||
806 | giveup_altivec(current); | ||
807 | preempt_enable(); | ||
808 | |||
809 | err = emulate_altivec(regs); | ||
810 | if (err == 0) { | ||
811 | regs->nip += 4; /* skip emulated instruction */ | ||
812 | emulate_single_step(regs); | ||
813 | return; | ||
814 | } | ||
815 | |||
816 | if (err == -EFAULT) { | ||
817 | /* got an error reading the instruction */ | ||
818 | _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); | ||
819 | } else { | ||
820 | /* didn't recognize the instruction */ | ||
821 | /* XXX quick hack for now: set the non-Java bit in the VSCR */ | ||
822 | printk(KERN_ERR "unrecognized altivec instruction " | ||
823 | "in %s at %lx\n", current->comm, regs->nip); | ||
824 | current->thread.vscr.u[3] |= 0x10000; | ||
825 | } | ||
826 | } | ||
827 | #endif /* CONFIG_ALTIVEC */ | ||
828 | |||
829 | void PerformanceMonitorException(struct pt_regs *regs) | ||
830 | { | ||
831 | perf_irq(regs); | ||
832 | } | ||
833 | |||
834 | #ifdef CONFIG_FSL_BOOKE | ||
835 | void CacheLockingException(struct pt_regs *regs, unsigned long address, | ||
836 | unsigned long error_code) | ||
837 | { | ||
838 | /* We treat cache locking instructions from the user | ||
839 | * as priv ops, in the future we could try to do | ||
840 | * something smarter | ||
841 | */ | ||
842 | if (error_code & (ESR_DLK|ESR_ILK)) | ||
843 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); | ||
844 | return; | ||
845 | } | ||
846 | #endif /* CONFIG_FSL_BOOKE */ | ||
847 | |||
848 | #ifdef CONFIG_SPE | ||
849 | void SPEFloatingPointException(struct pt_regs *regs) | ||
850 | { | ||
851 | unsigned long spefscr; | ||
852 | int fpexc_mode; | ||
853 | int code = 0; | ||
854 | |||
855 | spefscr = current->thread.spefscr; | ||
856 | fpexc_mode = current->thread.fpexc_mode; | ||
857 | |||
858 | /* Hardware does not neccessarily set sticky | ||
859 | * underflow/overflow/invalid flags */ | ||
860 | if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { | ||
861 | code = FPE_FLTOVF; | ||
862 | spefscr |= SPEFSCR_FOVFS; | ||
863 | } | ||
864 | else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { | ||
865 | code = FPE_FLTUND; | ||
866 | spefscr |= SPEFSCR_FUNFS; | ||
867 | } | ||
868 | else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) | ||
869 | code = FPE_FLTDIV; | ||
870 | else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { | ||
871 | code = FPE_FLTINV; | ||
872 | spefscr |= SPEFSCR_FINVS; | ||
873 | } | ||
874 | else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) | ||
875 | code = FPE_FLTRES; | ||
876 | |||
877 | current->thread.spefscr = spefscr; | ||
878 | |||
879 | _exception(SIGFPE, regs, code, regs->nip); | ||
880 | return; | ||
881 | } | ||
882 | #endif | ||
883 | |||
884 | void __init trap_init(void) | ||
885 | { | ||
886 | } | ||
diff --git a/arch/ppc/kernel/vecemu.c b/arch/ppc/kernel/vecemu.c new file mode 100644 index 000000000000..604d0947cb20 --- /dev/null +++ b/arch/ppc/kernel/vecemu.c | |||
@@ -0,0 +1,345 @@ | |||
1 | /* | ||
2 | * Routines to emulate some Altivec/VMX instructions, specifically | ||
3 | * those that can trap when given denormalized operands in Java mode. | ||
4 | */ | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/errno.h> | ||
7 | #include <linux/sched.h> | ||
8 | #include <asm/ptrace.h> | ||
9 | #include <asm/processor.h> | ||
10 | #include <asm/uaccess.h> | ||
11 | |||
12 | /* Functions in vector.S */ | ||
13 | extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b); | ||
14 | extern void vsubfp(vector128 *dst, vector128 *a, vector128 *b); | ||
15 | extern void vmaddfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c); | ||
16 | extern void vnmsubfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c); | ||
17 | extern void vrefp(vector128 *dst, vector128 *src); | ||
18 | extern void vrsqrtefp(vector128 *dst, vector128 *src); | ||
19 | extern void vexptep(vector128 *dst, vector128 *src); | ||
20 | |||
21 | static unsigned int exp2s[8] = { | ||
22 | 0x800000, | ||
23 | 0x8b95c2, | ||
24 | 0x9837f0, | ||
25 | 0xa5fed7, | ||
26 | 0xb504f3, | ||
27 | 0xc5672a, | ||
28 | 0xd744fd, | ||
29 | 0xeac0c7 | ||
30 | }; | ||
31 | |||
32 | /* | ||
33 | * Computes an estimate of 2^x. The `s' argument is the 32-bit | ||
34 | * single-precision floating-point representation of x. | ||
35 | */ | ||
36 | static unsigned int eexp2(unsigned int s) | ||
37 | { | ||
38 | int exp, pwr; | ||
39 | unsigned int mant, frac; | ||
40 | |||
41 | /* extract exponent field from input */ | ||
42 | exp = ((s >> 23) & 0xff) - 127; | ||
43 | if (exp > 7) { | ||
44 | /* check for NaN input */ | ||
45 | if (exp == 128 && (s & 0x7fffff) != 0) | ||
46 | return s | 0x400000; /* return QNaN */ | ||
47 | /* 2^-big = 0, 2^+big = +Inf */ | ||
48 | return (s & 0x80000000)? 0: 0x7f800000; /* 0 or +Inf */ | ||
49 | } | ||
50 | if (exp < -23) | ||
51 | return 0x3f800000; /* 1.0 */ | ||
52 | |||
53 | /* convert to fixed point integer in 9.23 representation */ | ||
54 | pwr = (s & 0x7fffff) | 0x800000; | ||
55 | if (exp > 0) | ||
56 | pwr <<= exp; | ||
57 | else | ||
58 | pwr >>= -exp; | ||
59 | if (s & 0x80000000) | ||
60 | pwr = -pwr; | ||
61 | |||
62 | /* extract integer part, which becomes exponent part of result */ | ||
63 | exp = (pwr >> 23) + 126; | ||
64 | if (exp >= 254) | ||
65 | return 0x7f800000; | ||
66 | if (exp < -23) | ||
67 | return 0; | ||
68 | |||
69 | /* table lookup on top 3 bits of fraction to get mantissa */ | ||
70 | mant = exp2s[(pwr >> 20) & 7]; | ||
71 | |||
72 | /* linear interpolation using remaining 20 bits of fraction */ | ||
73 | asm("mulhwu %0,%1,%2" : "=r" (frac) | ||
74 | : "r" (pwr << 12), "r" (0x172b83ff)); | ||
75 | asm("mulhwu %0,%1,%2" : "=r" (frac) : "r" (frac), "r" (mant)); | ||
76 | mant += frac; | ||
77 | |||
78 | if (exp >= 0) | ||
79 | return mant + (exp << 23); | ||
80 | |||
81 | /* denormalized result */ | ||
82 | exp = -exp; | ||
83 | mant += 1 << (exp - 1); | ||
84 | return mant >> exp; | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Computes an estimate of log_2(x). The `s' argument is the 32-bit | ||
89 | * single-precision floating-point representation of x. | ||
90 | */ | ||
91 | static unsigned int elog2(unsigned int s) | ||
92 | { | ||
93 | int exp, mant, lz, frac; | ||
94 | |||
95 | exp = s & 0x7f800000; | ||
96 | mant = s & 0x7fffff; | ||
97 | if (exp == 0x7f800000) { /* Inf or NaN */ | ||
98 | if (mant != 0) | ||
99 | s |= 0x400000; /* turn NaN into QNaN */ | ||
100 | return s; | ||
101 | } | ||
102 | if ((exp | mant) == 0) /* +0 or -0 */ | ||
103 | return 0xff800000; /* return -Inf */ | ||
104 | |||
105 | if (exp == 0) { | ||
106 | /* denormalized */ | ||
107 | asm("cntlzw %0,%1" : "=r" (lz) : "r" (mant)); | ||
108 | mant <<= lz - 8; | ||
109 | exp = (-118 - lz) << 23; | ||
110 | } else { | ||
111 | mant |= 0x800000; | ||
112 | exp -= 127 << 23; | ||
113 | } | ||
114 | |||
115 | if (mant >= 0xb504f3) { /* 2^0.5 * 2^23 */ | ||
116 | exp |= 0x400000; /* 0.5 * 2^23 */ | ||
117 | asm("mulhwu %0,%1,%2" : "=r" (mant) | ||
118 | : "r" (mant), "r" (0xb504f334)); /* 2^-0.5 * 2^32 */ | ||
119 | } | ||
120 | if (mant >= 0x9837f0) { /* 2^0.25 * 2^23 */ | ||
121 | exp |= 0x200000; /* 0.25 * 2^23 */ | ||
122 | asm("mulhwu %0,%1,%2" : "=r" (mant) | ||
123 | : "r" (mant), "r" (0xd744fccb)); /* 2^-0.25 * 2^32 */ | ||
124 | } | ||
125 | if (mant >= 0x8b95c2) { /* 2^0.125 * 2^23 */ | ||
126 | exp |= 0x100000; /* 0.125 * 2^23 */ | ||
127 | asm("mulhwu %0,%1,%2" : "=r" (mant) | ||
128 | : "r" (mant), "r" (0xeac0c6e8)); /* 2^-0.125 * 2^32 */ | ||
129 | } | ||
130 | if (mant > 0x800000) { /* 1.0 * 2^23 */ | ||
131 | /* calculate (mant - 1) * 1.381097463 */ | ||
132 | /* 1.381097463 == 0.125 / (2^0.125 - 1) */ | ||
133 | asm("mulhwu %0,%1,%2" : "=r" (frac) | ||
134 | : "r" ((mant - 0x800000) << 1), "r" (0xb0c7cd3a)); | ||
135 | exp += frac; | ||
136 | } | ||
137 | s = exp & 0x80000000; | ||
138 | if (exp != 0) { | ||
139 | if (s) | ||
140 | exp = -exp; | ||
141 | asm("cntlzw %0,%1" : "=r" (lz) : "r" (exp)); | ||
142 | lz = 8 - lz; | ||
143 | if (lz > 0) | ||
144 | exp >>= lz; | ||
145 | else if (lz < 0) | ||
146 | exp <<= -lz; | ||
147 | s += ((lz + 126) << 23) + exp; | ||
148 | } | ||
149 | return s; | ||
150 | } | ||
151 | |||
152 | #define VSCR_SAT 1 | ||
153 | |||
154 | static int ctsxs(unsigned int x, int scale, unsigned int *vscrp) | ||
155 | { | ||
156 | int exp, mant; | ||
157 | |||
158 | exp = (x >> 23) & 0xff; | ||
159 | mant = x & 0x7fffff; | ||
160 | if (exp == 255 && mant != 0) | ||
161 | return 0; /* NaN -> 0 */ | ||
162 | exp = exp - 127 + scale; | ||
163 | if (exp < 0) | ||
164 | return 0; /* round towards zero */ | ||
165 | if (exp >= 31) { | ||
166 | /* saturate, unless the result would be -2^31 */ | ||
167 | if (x + (scale << 23) != 0xcf000000) | ||
168 | *vscrp |= VSCR_SAT; | ||
169 | return (x & 0x80000000)? 0x80000000: 0x7fffffff; | ||
170 | } | ||
171 | mant |= 0x800000; | ||
172 | mant = (mant << 7) >> (30 - exp); | ||
173 | return (x & 0x80000000)? -mant: mant; | ||
174 | } | ||
175 | |||
176 | static unsigned int ctuxs(unsigned int x, int scale, unsigned int *vscrp) | ||
177 | { | ||
178 | int exp; | ||
179 | unsigned int mant; | ||
180 | |||
181 | exp = (x >> 23) & 0xff; | ||
182 | mant = x & 0x7fffff; | ||
183 | if (exp == 255 && mant != 0) | ||
184 | return 0; /* NaN -> 0 */ | ||
185 | exp = exp - 127 + scale; | ||
186 | if (exp < 0) | ||
187 | return 0; /* round towards zero */ | ||
188 | if (x & 0x80000000) { | ||
189 | /* negative => saturate to 0 */ | ||
190 | *vscrp |= VSCR_SAT; | ||
191 | return 0; | ||
192 | } | ||
193 | if (exp >= 32) { | ||
194 | /* saturate */ | ||
195 | *vscrp |= VSCR_SAT; | ||
196 | return 0xffffffff; | ||
197 | } | ||
198 | mant |= 0x800000; | ||
199 | mant = (mant << 8) >> (31 - exp); | ||
200 | return mant; | ||
201 | } | ||
202 | |||
203 | /* Round to floating integer, towards 0 */ | ||
204 | static unsigned int rfiz(unsigned int x) | ||
205 | { | ||
206 | int exp; | ||
207 | |||
208 | exp = ((x >> 23) & 0xff) - 127; | ||
209 | if (exp == 128 && (x & 0x7fffff) != 0) | ||
210 | return x | 0x400000; /* NaN -> make it a QNaN */ | ||
211 | if (exp >= 23) | ||
212 | return x; /* it's an integer already (or Inf) */ | ||
213 | if (exp < 0) | ||
214 | return x & 0x80000000; /* |x| < 1.0 rounds to 0 */ | ||
215 | return x & ~(0x7fffff >> exp); | ||
216 | } | ||
217 | |||
218 | /* Round to floating integer, towards +/- Inf */ | ||
219 | static unsigned int rfii(unsigned int x) | ||
220 | { | ||
221 | int exp, mask; | ||
222 | |||
223 | exp = ((x >> 23) & 0xff) - 127; | ||
224 | if (exp == 128 && (x & 0x7fffff) != 0) | ||
225 | return x | 0x400000; /* NaN -> make it a QNaN */ | ||
226 | if (exp >= 23) | ||
227 | return x; /* it's an integer already (or Inf) */ | ||
228 | if ((x & 0x7fffffff) == 0) | ||
229 | return x; /* +/-0 -> +/-0 */ | ||
230 | if (exp < 0) | ||
231 | /* 0 < |x| < 1.0 rounds to +/- 1.0 */ | ||
232 | return (x & 0x80000000) | 0x3f800000; | ||
233 | mask = 0x7fffff >> exp; | ||
234 | /* mantissa overflows into exponent - that's OK, | ||
235 | it can't overflow into the sign bit */ | ||
236 | return (x + mask) & ~mask; | ||
237 | } | ||
238 | |||
239 | /* Round to floating integer, to nearest */ | ||
240 | static unsigned int rfin(unsigned int x) | ||
241 | { | ||
242 | int exp, half; | ||
243 | |||
244 | exp = ((x >> 23) & 0xff) - 127; | ||
245 | if (exp == 128 && (x & 0x7fffff) != 0) | ||
246 | return x | 0x400000; /* NaN -> make it a QNaN */ | ||
247 | if (exp >= 23) | ||
248 | return x; /* it's an integer already (or Inf) */ | ||
249 | if (exp < -1) | ||
250 | return x & 0x80000000; /* |x| < 0.5 -> +/-0 */ | ||
251 | if (exp == -1) | ||
252 | /* 0.5 <= |x| < 1.0 rounds to +/- 1.0 */ | ||
253 | return (x & 0x80000000) | 0x3f800000; | ||
254 | half = 0x400000 >> exp; | ||
255 | /* add 0.5 to the magnitude and chop off the fraction bits */ | ||
256 | return (x + half) & ~(0x7fffff >> exp); | ||
257 | } | ||
258 | |||
259 | int emulate_altivec(struct pt_regs *regs) | ||
260 | { | ||
261 | unsigned int instr, i; | ||
262 | unsigned int va, vb, vc, vd; | ||
263 | vector128 *vrs; | ||
264 | |||
265 | if (get_user(instr, (unsigned int __user *) regs->nip)) | ||
266 | return -EFAULT; | ||
267 | if ((instr >> 26) != 4) | ||
268 | return -EINVAL; /* not an altivec instruction */ | ||
269 | vd = (instr >> 21) & 0x1f; | ||
270 | va = (instr >> 16) & 0x1f; | ||
271 | vb = (instr >> 11) & 0x1f; | ||
272 | vc = (instr >> 6) & 0x1f; | ||
273 | |||
274 | vrs = current->thread.vr; | ||
275 | switch (instr & 0x3f) { | ||
276 | case 10: | ||
277 | switch (vc) { | ||
278 | case 0: /* vaddfp */ | ||
279 | vaddfp(&vrs[vd], &vrs[va], &vrs[vb]); | ||
280 | break; | ||
281 | case 1: /* vsubfp */ | ||
282 | vsubfp(&vrs[vd], &vrs[va], &vrs[vb]); | ||
283 | break; | ||
284 | case 4: /* vrefp */ | ||
285 | vrefp(&vrs[vd], &vrs[vb]); | ||
286 | break; | ||
287 | case 5: /* vrsqrtefp */ | ||
288 | vrsqrtefp(&vrs[vd], &vrs[vb]); | ||
289 | break; | ||
290 | case 6: /* vexptefp */ | ||
291 | for (i = 0; i < 4; ++i) | ||
292 | vrs[vd].u[i] = eexp2(vrs[vb].u[i]); | ||
293 | break; | ||
294 | case 7: /* vlogefp */ | ||
295 | for (i = 0; i < 4; ++i) | ||
296 | vrs[vd].u[i] = elog2(vrs[vb].u[i]); | ||
297 | break; | ||
298 | case 8: /* vrfin */ | ||
299 | for (i = 0; i < 4; ++i) | ||
300 | vrs[vd].u[i] = rfin(vrs[vb].u[i]); | ||
301 | break; | ||
302 | case 9: /* vrfiz */ | ||
303 | for (i = 0; i < 4; ++i) | ||
304 | vrs[vd].u[i] = rfiz(vrs[vb].u[i]); | ||
305 | break; | ||
306 | case 10: /* vrfip */ | ||
307 | for (i = 0; i < 4; ++i) { | ||
308 | u32 x = vrs[vb].u[i]; | ||
309 | x = (x & 0x80000000)? rfiz(x): rfii(x); | ||
310 | vrs[vd].u[i] = x; | ||
311 | } | ||
312 | break; | ||
313 | case 11: /* vrfim */ | ||
314 | for (i = 0; i < 4; ++i) { | ||
315 | u32 x = vrs[vb].u[i]; | ||
316 | x = (x & 0x80000000)? rfii(x): rfiz(x); | ||
317 | vrs[vd].u[i] = x; | ||
318 | } | ||
319 | break; | ||
320 | case 14: /* vctuxs */ | ||
321 | for (i = 0; i < 4; ++i) | ||
322 | vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va, | ||
323 | ¤t->thread.vscr.u[3]); | ||
324 | break; | ||
325 | case 15: /* vctsxs */ | ||
326 | for (i = 0; i < 4; ++i) | ||
327 | vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va, | ||
328 | ¤t->thread.vscr.u[3]); | ||
329 | break; | ||
330 | default: | ||
331 | return -EINVAL; | ||
332 | } | ||
333 | break; | ||
334 | case 46: /* vmaddfp */ | ||
335 | vmaddfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]); | ||
336 | break; | ||
337 | case 47: /* vnmsubfp */ | ||
338 | vnmsubfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]); | ||
339 | break; | ||
340 | default: | ||
341 | return -EINVAL; | ||
342 | } | ||
343 | |||
344 | return 0; | ||
345 | } | ||
diff --git a/arch/ppc/kernel/vector.S b/arch/ppc/kernel/vector.S new file mode 100644 index 000000000000..82a21346bf80 --- /dev/null +++ b/arch/ppc/kernel/vector.S | |||
@@ -0,0 +1,217 @@ | |||
1 | #include <asm/ppc_asm.h> | ||
2 | #include <asm/processor.h> | ||
3 | |||
4 | /* | ||
5 | * The routines below are in assembler so we can closely control the | ||
6 | * usage of floating-point registers. These routines must be called | ||
7 | * with preempt disabled. | ||
8 | */ | ||
9 | .data | ||
10 | fpzero: | ||
11 | .long 0 | ||
12 | fpone: | ||
13 | .long 0x3f800000 /* 1.0 in single-precision FP */ | ||
14 | fphalf: | ||
15 | .long 0x3f000000 /* 0.5 in single-precision FP */ | ||
16 | |||
17 | .text | ||
18 | /* | ||
19 | * Internal routine to enable floating point and set FPSCR to 0. | ||
20 | * Don't call it from C; it doesn't use the normal calling convention. | ||
21 | */ | ||
22 | fpenable: | ||
23 | mfmsr r10 | ||
24 | ori r11,r10,MSR_FP | ||
25 | mtmsr r11 | ||
26 | isync | ||
27 | stfd fr0,24(r1) | ||
28 | stfd fr1,16(r1) | ||
29 | stfd fr31,8(r1) | ||
30 | lis r11,fpzero@ha | ||
31 | mffs fr31 | ||
32 | lfs fr1,fpzero@l(r11) | ||
33 | mtfsf 0xff,fr1 | ||
34 | blr | ||
35 | |||
36 | fpdisable: | ||
37 | mtfsf 0xff,fr31 | ||
38 | lfd fr31,8(r1) | ||
39 | lfd fr1,16(r1) | ||
40 | lfd fr0,24(r1) | ||
41 | mtmsr r10 | ||
42 | isync | ||
43 | blr | ||
44 | |||
45 | /* | ||
46 | * Vector add, floating point. | ||
47 | */ | ||
48 | .globl vaddfp | ||
49 | vaddfp: | ||
50 | stwu r1,-32(r1) | ||
51 | mflr r0 | ||
52 | stw r0,36(r1) | ||
53 | bl fpenable | ||
54 | li r0,4 | ||
55 | mtctr r0 | ||
56 | li r6,0 | ||
57 | 1: lfsx fr0,r4,r6 | ||
58 | lfsx fr1,r5,r6 | ||
59 | fadds fr0,fr0,fr1 | ||
60 | stfsx fr0,r3,r6 | ||
61 | addi r6,r6,4 | ||
62 | bdnz 1b | ||
63 | bl fpdisable | ||
64 | lwz r0,36(r1) | ||
65 | mtlr r0 | ||
66 | addi r1,r1,32 | ||
67 | blr | ||
68 | |||
69 | /* | ||
70 | * Vector subtract, floating point. | ||
71 | */ | ||
72 | .globl vsubfp | ||
73 | vsubfp: | ||
74 | stwu r1,-32(r1) | ||
75 | mflr r0 | ||
76 | stw r0,36(r1) | ||
77 | bl fpenable | ||
78 | li r0,4 | ||
79 | mtctr r0 | ||
80 | li r6,0 | ||
81 | 1: lfsx fr0,r4,r6 | ||
82 | lfsx fr1,r5,r6 | ||
83 | fsubs fr0,fr0,fr1 | ||
84 | stfsx fr0,r3,r6 | ||
85 | addi r6,r6,4 | ||
86 | bdnz 1b | ||
87 | bl fpdisable | ||
88 | lwz r0,36(r1) | ||
89 | mtlr r0 | ||
90 | addi r1,r1,32 | ||
91 | blr | ||
92 | |||
93 | /* | ||
94 | * Vector multiply and add, floating point. | ||
95 | */ | ||
96 | .globl vmaddfp | ||
97 | vmaddfp: | ||
98 | stwu r1,-48(r1) | ||
99 | mflr r0 | ||
100 | stw r0,52(r1) | ||
101 | bl fpenable | ||
102 | stfd fr2,32(r1) | ||
103 | li r0,4 | ||
104 | mtctr r0 | ||
105 | li r7,0 | ||
106 | 1: lfsx fr0,r4,r7 | ||
107 | lfsx fr1,r5,r7 | ||
108 | lfsx fr2,r6,r7 | ||
109 | fmadds fr0,fr0,fr2,fr1 | ||
110 | stfsx fr0,r3,r7 | ||
111 | addi r7,r7,4 | ||
112 | bdnz 1b | ||
113 | lfd fr2,32(r1) | ||
114 | bl fpdisable | ||
115 | lwz r0,52(r1) | ||
116 | mtlr r0 | ||
117 | addi r1,r1,48 | ||
118 | blr | ||
119 | |||
120 | /* | ||
121 | * Vector negative multiply and subtract, floating point. | ||
122 | */ | ||
123 | .globl vnmsubfp | ||
124 | vnmsubfp: | ||
125 | stwu r1,-48(r1) | ||
126 | mflr r0 | ||
127 | stw r0,52(r1) | ||
128 | bl fpenable | ||
129 | stfd fr2,32(r1) | ||
130 | li r0,4 | ||
131 | mtctr r0 | ||
132 | li r7,0 | ||
133 | 1: lfsx fr0,r4,r7 | ||
134 | lfsx fr1,r5,r7 | ||
135 | lfsx fr2,r6,r7 | ||
136 | fnmsubs fr0,fr0,fr2,fr1 | ||
137 | stfsx fr0,r3,r7 | ||
138 | addi r7,r7,4 | ||
139 | bdnz 1b | ||
140 | lfd fr2,32(r1) | ||
141 | bl fpdisable | ||
142 | lwz r0,52(r1) | ||
143 | mtlr r0 | ||
144 | addi r1,r1,48 | ||
145 | blr | ||
146 | |||
147 | /* | ||
148 | * Vector reciprocal estimate. We just compute 1.0/x. | ||
149 | * r3 -> destination, r4 -> source. | ||
150 | */ | ||
151 | .globl vrefp | ||
152 | vrefp: | ||
153 | stwu r1,-32(r1) | ||
154 | mflr r0 | ||
155 | stw r0,36(r1) | ||
156 | bl fpenable | ||
157 | lis r9,fpone@ha | ||
158 | li r0,4 | ||
159 | lfs fr1,fpone@l(r9) | ||
160 | mtctr r0 | ||
161 | li r6,0 | ||
162 | 1: lfsx fr0,r4,r6 | ||
163 | fdivs fr0,fr1,fr0 | ||
164 | stfsx fr0,r3,r6 | ||
165 | addi r6,r6,4 | ||
166 | bdnz 1b | ||
167 | bl fpdisable | ||
168 | lwz r0,36(r1) | ||
169 | mtlr r0 | ||
170 | addi r1,r1,32 | ||
171 | blr | ||
172 | |||
173 | /* | ||
174 | * Vector reciprocal square-root estimate, floating point. | ||
175 | * We use the frsqrte instruction for the initial estimate followed | ||
176 | * by 2 iterations of Newton-Raphson to get sufficient accuracy. | ||
177 | * r3 -> destination, r4 -> source. | ||
178 | */ | ||
179 | .globl vrsqrtefp | ||
180 | vrsqrtefp: | ||
181 | stwu r1,-48(r1) | ||
182 | mflr r0 | ||
183 | stw r0,52(r1) | ||
184 | bl fpenable | ||
185 | stfd fr2,32(r1) | ||
186 | stfd fr3,40(r1) | ||
187 | stfd fr4,48(r1) | ||
188 | stfd fr5,56(r1) | ||
189 | lis r9,fpone@ha | ||
190 | lis r8,fphalf@ha | ||
191 | li r0,4 | ||
192 | lfs fr4,fpone@l(r9) | ||
193 | lfs fr5,fphalf@l(r8) | ||
194 | mtctr r0 | ||
195 | li r6,0 | ||
196 | 1: lfsx fr0,r4,r6 | ||
197 | frsqrte fr1,fr0 /* r = frsqrte(s) */ | ||
198 | fmuls fr3,fr1,fr0 /* r * s */ | ||
199 | fmuls fr2,fr1,fr5 /* r * 0.5 */ | ||
200 | fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ | ||
201 | fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ | ||
202 | fmuls fr3,fr1,fr0 /* r * s */ | ||
203 | fmuls fr2,fr1,fr5 /* r * 0.5 */ | ||
204 | fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ | ||
205 | fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ | ||
206 | stfsx fr1,r3,r6 | ||
207 | addi r6,r6,4 | ||
208 | bdnz 1b | ||
209 | lfd fr5,56(r1) | ||
210 | lfd fr4,48(r1) | ||
211 | lfd fr3,40(r1) | ||
212 | lfd fr2,32(r1) | ||
213 | bl fpdisable | ||
214 | lwz r0,36(r1) | ||
215 | mtlr r0 | ||
216 | addi r1,r1,32 | ||
217 | blr | ||
diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..0c0e714b84de --- /dev/null +++ b/arch/ppc/kernel/vmlinux.lds.S | |||
@@ -0,0 +1,192 @@ | |||
1 | #include <asm-generic/vmlinux.lds.h> | ||
2 | |||
3 | OUTPUT_ARCH(powerpc:common) | ||
4 | jiffies = jiffies_64 + 4; | ||
5 | SECTIONS | ||
6 | { | ||
7 | /* Read-only sections, merged into text segment: */ | ||
8 | . = + SIZEOF_HEADERS; | ||
9 | .interp : { *(.interp) } | ||
10 | .hash : { *(.hash) } | ||
11 | .dynsym : { *(.dynsym) } | ||
12 | .dynstr : { *(.dynstr) } | ||
13 | .rel.text : { *(.rel.text) } | ||
14 | .rela.text : { *(.rela.text) } | ||
15 | .rel.data : { *(.rel.data) } | ||
16 | .rela.data : { *(.rela.data) } | ||
17 | .rel.rodata : { *(.rel.rodata) } | ||
18 | .rela.rodata : { *(.rela.rodata) } | ||
19 | .rel.got : { *(.rel.got) } | ||
20 | .rela.got : { *(.rela.got) } | ||
21 | .rel.ctors : { *(.rel.ctors) } | ||
22 | .rela.ctors : { *(.rela.ctors) } | ||
23 | .rel.dtors : { *(.rel.dtors) } | ||
24 | .rela.dtors : { *(.rela.dtors) } | ||
25 | .rel.bss : { *(.rel.bss) } | ||
26 | .rela.bss : { *(.rela.bss) } | ||
27 | .rel.plt : { *(.rel.plt) } | ||
28 | .rela.plt : { *(.rela.plt) } | ||
29 | /* .init : { *(.init) } =0*/ | ||
30 | .plt : { *(.plt) } | ||
31 | .text : | ||
32 | { | ||
33 | *(.text) | ||
34 | SCHED_TEXT | ||
35 | LOCK_TEXT | ||
36 | *(.fixup) | ||
37 | *(.got1) | ||
38 | __got2_start = .; | ||
39 | *(.got2) | ||
40 | __got2_end = .; | ||
41 | } | ||
42 | _etext = .; | ||
43 | PROVIDE (etext = .); | ||
44 | |||
45 | RODATA | ||
46 | .fini : { *(.fini) } =0 | ||
47 | .ctors : { *(.ctors) } | ||
48 | .dtors : { *(.dtors) } | ||
49 | |||
50 | .fixup : { *(.fixup) } | ||
51 | |||
52 | __ex_table : { | ||
53 | __start___ex_table = .; | ||
54 | *(__ex_table) | ||
55 | __stop___ex_table = .; | ||
56 | } | ||
57 | |||
58 | __bug_table : { | ||
59 | __start___bug_table = .; | ||
60 | *(__bug_table) | ||
61 | __stop___bug_table = .; | ||
62 | } | ||
63 | |||
64 | /* Read-write section, merged into data segment: */ | ||
65 | . = ALIGN(4096); | ||
66 | .data : | ||
67 | { | ||
68 | *(.data) | ||
69 | *(.data1) | ||
70 | *(.sdata) | ||
71 | *(.sdata2) | ||
72 | *(.got.plt) *(.got) | ||
73 | *(.dynamic) | ||
74 | CONSTRUCTORS | ||
75 | } | ||
76 | |||
77 | . = ALIGN(4096); | ||
78 | __nosave_begin = .; | ||
79 | .data_nosave : { *(.data.nosave) } | ||
80 | . = ALIGN(4096); | ||
81 | __nosave_end = .; | ||
82 | |||
83 | . = ALIGN(32); | ||
84 | .data.cacheline_aligned : { *(.data.cacheline_aligned) } | ||
85 | |||
86 | _edata = .; | ||
87 | PROVIDE (edata = .); | ||
88 | |||
89 | . = ALIGN(8192); | ||
90 | .data.init_task : { *(.data.init_task) } | ||
91 | |||
92 | . = ALIGN(4096); | ||
93 | __init_begin = .; | ||
94 | .init.text : { | ||
95 | _sinittext = .; | ||
96 | *(.init.text) | ||
97 | _einittext = .; | ||
98 | } | ||
99 | .init.data : { | ||
100 | *(.init.data); | ||
101 | __vtop_table_begin = .; | ||
102 | *(.vtop_fixup); | ||
103 | __vtop_table_end = .; | ||
104 | __ptov_table_begin = .; | ||
105 | *(.ptov_fixup); | ||
106 | __ptov_table_end = .; | ||
107 | } | ||
108 | . = ALIGN(16); | ||
109 | __setup_start = .; | ||
110 | .init.setup : { *(.init.setup) } | ||
111 | __setup_end = .; | ||
112 | __initcall_start = .; | ||
113 | .initcall.init : { | ||
114 | *(.initcall1.init) | ||
115 | *(.initcall2.init) | ||
116 | *(.initcall3.init) | ||
117 | *(.initcall4.init) | ||
118 | *(.initcall5.init) | ||
119 | *(.initcall6.init) | ||
120 | *(.initcall7.init) | ||
121 | } | ||
122 | __initcall_end = .; | ||
123 | |||
124 | __con_initcall_start = .; | ||
125 | .con_initcall.init : { *(.con_initcall.init) } | ||
126 | __con_initcall_end = .; | ||
127 | |||
128 | SECURITY_INIT | ||
129 | |||
130 | __start___ftr_fixup = .; | ||
131 | __ftr_fixup : { *(__ftr_fixup) } | ||
132 | __stop___ftr_fixup = .; | ||
133 | |||
134 | . = ALIGN(32); | ||
135 | __per_cpu_start = .; | ||
136 | .data.percpu : { *(.data.percpu) } | ||
137 | __per_cpu_end = .; | ||
138 | |||
139 | . = ALIGN(4096); | ||
140 | __initramfs_start = .; | ||
141 | .init.ramfs : { *(.init.ramfs) } | ||
142 | __initramfs_end = .; | ||
143 | |||
144 | . = ALIGN(4096); | ||
145 | __init_end = .; | ||
146 | |||
147 | . = ALIGN(4096); | ||
148 | __pmac_begin = .; | ||
149 | .pmac.text : { *(.pmac.text) } | ||
150 | .pmac.data : { *(.pmac.data) } | ||
151 | . = ALIGN(4096); | ||
152 | __pmac_end = .; | ||
153 | |||
154 | . = ALIGN(4096); | ||
155 | __prep_begin = .; | ||
156 | .prep.text : { *(.prep.text) } | ||
157 | .prep.data : { *(.prep.data) } | ||
158 | . = ALIGN(4096); | ||
159 | __prep_end = .; | ||
160 | |||
161 | . = ALIGN(4096); | ||
162 | __chrp_begin = .; | ||
163 | .chrp.text : { *(.chrp.text) } | ||
164 | .chrp.data : { *(.chrp.data) } | ||
165 | . = ALIGN(4096); | ||
166 | __chrp_end = .; | ||
167 | |||
168 | . = ALIGN(4096); | ||
169 | __openfirmware_begin = .; | ||
170 | .openfirmware.text : { *(.openfirmware.text) } | ||
171 | .openfirmware.data : { *(.openfirmware.data) } | ||
172 | . = ALIGN(4096); | ||
173 | __openfirmware_end = .; | ||
174 | |||
175 | __bss_start = .; | ||
176 | .bss : | ||
177 | { | ||
178 | *(.sbss) *(.scommon) | ||
179 | *(.dynbss) | ||
180 | *(.bss) | ||
181 | *(COMMON) | ||
182 | } | ||
183 | __bss_stop = .; | ||
184 | |||
185 | _end = . ; | ||
186 | PROVIDE (end = .); | ||
187 | |||
188 | /* Sections to be discarded. */ | ||
189 | /DISCARD/ : { | ||
190 | *(.exitcall.exit) | ||
191 | } | ||
192 | } | ||