diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2005-11-17 22:09:41 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-11-17 22:39:23 -0500 |
commit | 5daf9071b527089b1bd5d9cb3a5354b83121550e (patch) | |
tree | 3abf10c1f67975f3a7d0def22de261f395c325f1 | |
parent | 6defa38b3754c84cd3449447477aed81ea979407 (diff) |
[PATCH] powerpc: merge align.c
This patch merges align.c, the result isn't quite what was in ppc64 nor
what was in ppc32 :) It should implement all the functionalities of both
though. Kumar, since you played with that in the past, I suppose you
have some test cases for verifying that it works properly before I dig
out the 601 machine ? :)
Since it's likely that I won't be able to test all scenario, code
inspection is much welcome.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/align.c (renamed from arch/ppc64/kernel/align.c) | 394 | ||||
-rw-r--r-- | arch/ppc/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/ppc/kernel/align.c | 410 | ||||
-rw-r--r-- | arch/ppc64/kernel/Makefile | 2 | ||||
-rw-r--r-- | include/asm-powerpc/cputable.h | 22 |
6 files changed, 280 insertions, 554 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 4970e3721a84..0e679afb2e20 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -12,7 +12,7 @@ CFLAGS_btext.o += -fPIC | |||
12 | endif | 12 | endif |
13 | 13 | ||
14 | obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ | 14 | obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ |
15 | irq.o signal_32.o pmc.o vdso.o | 15 | irq.o align.o signal_32.o pmc.o vdso.o |
16 | obj-y += vdso32/ | 16 | obj-y += vdso32/ |
17 | obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ | 17 | obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ |
18 | signal_64.o ptrace32.o systbl.o \ | 18 | signal_64.o ptrace32.o systbl.o \ |
diff --git a/arch/ppc64/kernel/align.c b/arch/powerpc/kernel/align.c index 256d5b592aa1..faaec9c6f78f 100644 --- a/arch/ppc64/kernel/align.c +++ b/arch/powerpc/kernel/align.c | |||
@@ -7,6 +7,9 @@ | |||
7 | * PowerPC 403GCX/405GP modifications. | 7 | * PowerPC 403GCX/405GP modifications. |
8 | * Copyright (c) 2001-2002 PPC64 team, IBM Corp | 8 | * Copyright (c) 2001-2002 PPC64 team, IBM Corp |
9 | * 64-bit and Power4 support | 9 | * 64-bit and Power4 support |
10 | * Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp | ||
11 | * <benh@kernel.crashing.org> | ||
12 | * Merge ppc32 and ppc64 implementations | ||
10 | * | 13 | * |
11 | * This program is free software; you can redistribute it and/or | 14 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | 15 | * modify it under the terms of the GNU General Public License |
@@ -38,10 +41,15 @@ struct aligninfo { | |||
38 | #define F 8 /* to/from fp regs */ | 41 | #define F 8 /* to/from fp regs */ |
39 | #define U 0x10 /* update index register */ | 42 | #define U 0x10 /* update index register */ |
40 | #define M 0x20 /* multiple load/store */ | 43 | #define M 0x20 /* multiple load/store */ |
41 | #define SW 0x40 /* byte swap */ | 44 | #define SW 0x40 /* byte swap int or ... */ |
45 | #define S 0x40 /* ... single-precision fp */ | ||
46 | #define SX 0x40 /* byte count in XER */ | ||
47 | #define HARD 0x80 /* string, stwcx. */ | ||
42 | 48 | ||
43 | #define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */ | 49 | #define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */ |
44 | 50 | ||
51 | #define SWAP(a, b) (t = (a), (a) = (b), (b) = t) | ||
52 | |||
45 | /* | 53 | /* |
46 | * The PowerPC stores certain bits of the instruction that caused the | 54 | * The PowerPC stores certain bits of the instruction that caused the |
47 | * alignment exception in the DSISR register. This array maps those | 55 | * alignment exception in the DSISR register. This array maps those |
@@ -57,14 +65,14 @@ static struct aligninfo aligninfo[128] = { | |||
57 | { 2, LD+SE }, /* 00 0 0101: lha */ | 65 | { 2, LD+SE }, /* 00 0 0101: lha */ |
58 | { 2, ST }, /* 00 0 0110: sth */ | 66 | { 2, ST }, /* 00 0 0110: sth */ |
59 | { 4, LD+M }, /* 00 0 0111: lmw */ | 67 | { 4, LD+M }, /* 00 0 0111: lmw */ |
60 | { 4, LD+F }, /* 00 0 1000: lfs */ | 68 | { 4, LD+F+S }, /* 00 0 1000: lfs */ |
61 | { 8, LD+F }, /* 00 0 1001: lfd */ | 69 | { 8, LD+F }, /* 00 0 1001: lfd */ |
62 | { 4, ST+F }, /* 00 0 1010: stfs */ | 70 | { 4, ST+F+S }, /* 00 0 1010: stfs */ |
63 | { 8, ST+F }, /* 00 0 1011: stfd */ | 71 | { 8, ST+F }, /* 00 0 1011: stfd */ |
64 | INVALID, /* 00 0 1100 */ | 72 | INVALID, /* 00 0 1100 */ |
65 | { 8, LD }, /* 00 0 1101: ld */ | 73 | { 8, LD }, /* 00 0 1101: ld/ldu/lwa */ |
66 | INVALID, /* 00 0 1110 */ | 74 | INVALID, /* 00 0 1110 */ |
67 | { 8, ST }, /* 00 0 1111: std */ | 75 | { 8, ST }, /* 00 0 1111: std/stdu */ |
68 | { 4, LD+U }, /* 00 1 0000: lwzu */ | 76 | { 4, LD+U }, /* 00 1 0000: lwzu */ |
69 | INVALID, /* 00 1 0001 */ | 77 | INVALID, /* 00 1 0001 */ |
70 | { 4, ST+U }, /* 00 1 0010: stwu */ | 78 | { 4, ST+U }, /* 00 1 0010: stwu */ |
@@ -73,9 +81,9 @@ static struct aligninfo aligninfo[128] = { | |||
73 | { 2, LD+SE+U }, /* 00 1 0101: lhau */ | 81 | { 2, LD+SE+U }, /* 00 1 0101: lhau */ |
74 | { 2, ST+U }, /* 00 1 0110: sthu */ | 82 | { 2, ST+U }, /* 00 1 0110: sthu */ |
75 | { 4, ST+M }, /* 00 1 0111: stmw */ | 83 | { 4, ST+M }, /* 00 1 0111: stmw */ |
76 | { 4, LD+F+U }, /* 00 1 1000: lfsu */ | 84 | { 4, LD+F+S+U }, /* 00 1 1000: lfsu */ |
77 | { 8, LD+F+U }, /* 00 1 1001: lfdu */ | 85 | { 8, LD+F+U }, /* 00 1 1001: lfdu */ |
78 | { 4, ST+F+U }, /* 00 1 1010: stfsu */ | 86 | { 4, ST+F+S+U }, /* 00 1 1010: stfsu */ |
79 | { 8, ST+F+U }, /* 00 1 1011: stfdu */ | 87 | { 8, ST+F+U }, /* 00 1 1011: stfdu */ |
80 | INVALID, /* 00 1 1100 */ | 88 | INVALID, /* 00 1 1100 */ |
81 | INVALID, /* 00 1 1101 */ | 89 | INVALID, /* 00 1 1101 */ |
@@ -89,10 +97,10 @@ static struct aligninfo aligninfo[128] = { | |||
89 | { 4, LD+SE }, /* 01 0 0101: lwax */ | 97 | { 4, LD+SE }, /* 01 0 0101: lwax */ |
90 | INVALID, /* 01 0 0110 */ | 98 | INVALID, /* 01 0 0110 */ |
91 | INVALID, /* 01 0 0111 */ | 99 | INVALID, /* 01 0 0111 */ |
92 | { 0, LD }, /* 01 0 1000: lswx */ | 100 | { 4, LD+M+HARD+SX }, /* 01 0 1000: lswx */ |
93 | { 0, LD }, /* 01 0 1001: lswi */ | 101 | { 4, LD+M+HARD }, /* 01 0 1001: lswi */ |
94 | { 0, ST }, /* 01 0 1010: stswx */ | 102 | { 4, ST+M+HARD+SX }, /* 01 0 1010: stswx */ |
95 | { 0, ST }, /* 01 0 1011: stswi */ | 103 | { 4, ST+M+HARD }, /* 01 0 1011: stswi */ |
96 | INVALID, /* 01 0 1100 */ | 104 | INVALID, /* 01 0 1100 */ |
97 | { 8, LD+U }, /* 01 0 1101: ldu */ | 105 | { 8, LD+U }, /* 01 0 1101: ldu */ |
98 | INVALID, /* 01 0 1110 */ | 106 | INVALID, /* 01 0 1110 */ |
@@ -115,7 +123,7 @@ static struct aligninfo aligninfo[128] = { | |||
115 | INVALID, /* 01 1 1111 */ | 123 | INVALID, /* 01 1 1111 */ |
116 | INVALID, /* 10 0 0000 */ | 124 | INVALID, /* 10 0 0000 */ |
117 | INVALID, /* 10 0 0001 */ | 125 | INVALID, /* 10 0 0001 */ |
118 | { 0, ST }, /* 10 0 0010: stwcx. */ | 126 | INVALID, /* 10 0 0010: stwcx. */ |
119 | INVALID, /* 10 0 0011 */ | 127 | INVALID, /* 10 0 0011 */ |
120 | INVALID, /* 10 0 0100 */ | 128 | INVALID, /* 10 0 0100 */ |
121 | INVALID, /* 10 0 0101 */ | 129 | INVALID, /* 10 0 0101 */ |
@@ -144,7 +152,7 @@ static struct aligninfo aligninfo[128] = { | |||
144 | INVALID, /* 10 1 1100 */ | 152 | INVALID, /* 10 1 1100 */ |
145 | INVALID, /* 10 1 1101 */ | 153 | INVALID, /* 10 1 1101 */ |
146 | INVALID, /* 10 1 1110 */ | 154 | INVALID, /* 10 1 1110 */ |
147 | { L1_CACHE_BYTES, ST }, /* 10 1 1111: dcbz */ | 155 | { 0, ST+HARD }, /* 10 1 1111: dcbz */ |
148 | { 4, LD }, /* 11 0 0000: lwzx */ | 156 | { 4, LD }, /* 11 0 0000: lwzx */ |
149 | INVALID, /* 11 0 0001 */ | 157 | INVALID, /* 11 0 0001 */ |
150 | { 4, ST }, /* 11 0 0010: stwx */ | 158 | { 4, ST }, /* 11 0 0010: stwx */ |
@@ -153,9 +161,9 @@ static struct aligninfo aligninfo[128] = { | |||
153 | { 2, LD+SE }, /* 11 0 0101: lhax */ | 161 | { 2, LD+SE }, /* 11 0 0101: lhax */ |
154 | { 2, ST }, /* 11 0 0110: sthx */ | 162 | { 2, ST }, /* 11 0 0110: sthx */ |
155 | INVALID, /* 11 0 0111 */ | 163 | INVALID, /* 11 0 0111 */ |
156 | { 4, LD+F }, /* 11 0 1000: lfsx */ | 164 | { 4, LD+F+S }, /* 11 0 1000: lfsx */ |
157 | { 8, LD+F }, /* 11 0 1001: lfdx */ | 165 | { 8, LD+F }, /* 11 0 1001: lfdx */ |
158 | { 4, ST+F }, /* 11 0 1010: stfsx */ | 166 | { 4, ST+F+S }, /* 11 0 1010: stfsx */ |
159 | { 8, ST+F }, /* 11 0 1011: stfdx */ | 167 | { 8, ST+F }, /* 11 0 1011: stfdx */ |
160 | INVALID, /* 11 0 1100 */ | 168 | INVALID, /* 11 0 1100 */ |
161 | { 8, LD+M }, /* 11 0 1101: lmd */ | 169 | { 8, LD+M }, /* 11 0 1101: lmd */ |
@@ -169,9 +177,9 @@ static struct aligninfo aligninfo[128] = { | |||
169 | { 2, LD+SE+U }, /* 11 1 0101: lhaux */ | 177 | { 2, LD+SE+U }, /* 11 1 0101: lhaux */ |
170 | { 2, ST+U }, /* 11 1 0110: sthux */ | 178 | { 2, ST+U }, /* 11 1 0110: sthux */ |
171 | INVALID, /* 11 1 0111 */ | 179 | INVALID, /* 11 1 0111 */ |
172 | { 4, LD+F+U }, /* 11 1 1000: lfsux */ | 180 | { 4, LD+F+S+U }, /* 11 1 1000: lfsux */ |
173 | { 8, LD+F+U }, /* 11 1 1001: lfdux */ | 181 | { 8, LD+F+U }, /* 11 1 1001: lfdux */ |
174 | { 4, ST+F+U }, /* 11 1 1010: stfsux */ | 182 | { 4, ST+F+S+U }, /* 11 1 1010: stfsux */ |
175 | { 8, ST+F+U }, /* 11 1 1011: stfdux */ | 183 | { 8, ST+F+U }, /* 11 1 1011: stfdux */ |
176 | INVALID, /* 11 1 1100 */ | 184 | INVALID, /* 11 1 1100 */ |
177 | INVALID, /* 11 1 1101 */ | 185 | INVALID, /* 11 1 1101 */ |
@@ -179,45 +187,175 @@ static struct aligninfo aligninfo[128] = { | |||
179 | INVALID, /* 11 1 1111 */ | 187 | INVALID, /* 11 1 1111 */ |
180 | }; | 188 | }; |
181 | 189 | ||
182 | #define SWAP(a, b) (t = (a), (a) = (b), (b) = t) | 190 | /* |
183 | 191 | * Create a DSISR value from the instruction | |
192 | */ | ||
184 | static inline unsigned make_dsisr(unsigned instr) | 193 | static inline unsigned make_dsisr(unsigned instr) |
185 | { | 194 | { |
186 | unsigned dsisr; | 195 | unsigned dsisr; |
187 | 196 | ||
188 | /* create a DSISR value from the instruction */ | 197 | |
189 | dsisr = (instr & 0x03ff0000) >> 16; /* bits 6:15 --> 22:31 */ | 198 | /* bits 6:15 --> 22:31 */ |
190 | 199 | dsisr = (instr & 0x03ff0000) >> 16; | |
191 | if ( IS_XFORM(instr) ) { | 200 | |
192 | dsisr |= (instr & 0x00000006) << 14; /* bits 29:30 --> 15:16 */ | 201 | if (IS_XFORM(instr)) { |
193 | dsisr |= (instr & 0x00000040) << 8; /* bit 25 --> 17 */ | 202 | /* bits 29:30 --> 15:16 */ |
194 | dsisr |= (instr & 0x00000780) << 3; /* bits 21:24 --> 18:21 */ | 203 | dsisr |= (instr & 0x00000006) << 14; |
204 | /* bit 25 --> 17 */ | ||
205 | dsisr |= (instr & 0x00000040) << 8; | ||
206 | /* bits 21:24 --> 18:21 */ | ||
207 | dsisr |= (instr & 0x00000780) << 3; | ||
208 | } else { | ||
209 | /* bit 5 --> 17 */ | ||
210 | dsisr |= (instr & 0x04000000) >> 12; | ||
211 | /* bits 1: 4 --> 18:21 */ | ||
212 | dsisr |= (instr & 0x78000000) >> 17; | ||
213 | /* bits 30:31 --> 12:13 */ | ||
214 | if (IS_DSFORM(instr)) | ||
215 | dsisr |= (instr & 0x00000003) << 18; | ||
195 | } | 216 | } |
196 | else { | 217 | |
197 | dsisr |= (instr & 0x04000000) >> 12; /* bit 5 --> 17 */ | 218 | return dsisr; |
198 | dsisr |= (instr & 0x78000000) >> 17; /* bits 1: 4 --> 18:21 */ | 219 | } |
199 | if ( IS_DSFORM(instr) ) { | 220 | |
200 | dsisr |= (instr & 0x00000003) << 18; /* bits 30:31 --> 12:13 */ | 221 | /* |
222 | * The dcbz (data cache block zero) instruction | ||
223 | * gives an alignment fault if used on non-cacheable | ||
224 | * memory. We handle the fault mainly for the | ||
225 | * case when we are running with the cache disabled | ||
226 | * for debugging. | ||
227 | */ | ||
228 | static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr) | ||
229 | { | ||
230 | long __user *p; | ||
231 | int i, size; | ||
232 | |||
233 | #ifdef __powerpc64__ | ||
234 | size = ppc64_caches.dline_size; | ||
235 | #else | ||
236 | size = L1_CACHE_BYTES; | ||
237 | #endif | ||
238 | p = (long __user *) (regs->dar & -size); | ||
239 | if (user_mode(regs) && !access_ok(VERIFY_WRITE, p, size)) | ||
240 | return -EFAULT; | ||
241 | for (i = 0; i < size / sizeof(long); ++i) | ||
242 | if (__put_user(0, p+i)) | ||
243 | return -EFAULT; | ||
244 | return 1; | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * Emulate load & store multiple instructions | ||
249 | * On 64-bit machines, these instructions only affect/use the | ||
250 | * bottom 4 bytes of each register, and the loads clear the | ||
251 | * top 4 bytes of the affected register. | ||
252 | */ | ||
253 | #ifdef CONFIG_PPC64 | ||
254 | #define REG_BYTE(rp, i) *((u8 *)((rp) + ((i) >> 2)) + ((i) & 3) + 4) | ||
255 | #else | ||
256 | #define REG_BYTE(rp, i) *((u8 *)(rp) + (i)) | ||
257 | #endif | ||
258 | |||
259 | static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, | ||
260 | unsigned int reg, unsigned int nb, | ||
261 | unsigned int flags, unsigned int instr) | ||
262 | { | ||
263 | unsigned long *rptr; | ||
264 | unsigned int nb0, i; | ||
265 | |||
266 | /* | ||
267 | * We do not try to emulate 8 bytes multiple as they aren't really | ||
268 | * available in our operating environments and we don't try to | ||
269 | * emulate multiples operations in kernel land as they should never | ||
270 | * be used/generated there at least not on unaligned boundaries | ||
271 | */ | ||
272 | if (unlikely((nb > 4) || !user_mode(regs))) | ||
273 | return 0; | ||
274 | |||
275 | /* lmw, stmw, lswi/x, stswi/x */ | ||
276 | nb0 = 0; | ||
277 | if (flags & HARD) { | ||
278 | if (flags & SX) { | ||
279 | nb = regs->xer & 127; | ||
280 | if (nb == 0) | ||
281 | return 1; | ||
282 | } else { | ||
283 | if (__get_user(instr, | ||
284 | (unsigned int __user *)regs->nip)) | ||
285 | return -EFAULT; | ||
286 | nb = (instr >> 11) & 0x1f; | ||
287 | if (nb == 0) | ||
288 | nb = 32; | ||
201 | } | 289 | } |
290 | if (nb + reg * 4 > 128) { | ||
291 | nb0 = nb + reg * 4 - 128; | ||
292 | nb = 128 - reg * 4; | ||
293 | } | ||
294 | } else { | ||
295 | /* lwm, stmw */ | ||
296 | nb = (32 - reg) * 4; | ||
202 | } | 297 | } |
203 | 298 | ||
204 | return dsisr; | 299 | if (!access_ok((flags & ST ? VERIFY_WRITE: VERIFY_READ), addr, nb+nb0)) |
300 | return -EFAULT; /* bad address */ | ||
301 | |||
302 | rptr = ®s->gpr[reg]; | ||
303 | if (flags & LD) { | ||
304 | /* | ||
305 | * This zeroes the top 4 bytes of the affected registers | ||
306 | * in 64-bit mode, and also zeroes out any remaining | ||
307 | * bytes of the last register for lsw*. | ||
308 | */ | ||
309 | memset(rptr, 0, ((nb + 3) / 4) * sizeof(unsigned long)); | ||
310 | if (nb0 > 0) | ||
311 | memset(®s->gpr[0], 0, | ||
312 | ((nb0 + 3) / 4) * sizeof(unsigned long)); | ||
313 | |||
314 | for (i = 0; i < nb; ++i) | ||
315 | if (__get_user(REG_BYTE(rptr, i), addr + i)) | ||
316 | return -EFAULT; | ||
317 | if (nb0 > 0) { | ||
318 | rptr = ®s->gpr[0]; | ||
319 | addr += nb; | ||
320 | for (i = 0; i < nb0; ++i) | ||
321 | if (__get_user(REG_BYTE(rptr, i), addr + i)) | ||
322 | return -EFAULT; | ||
323 | } | ||
324 | |||
325 | } else { | ||
326 | for (i = 0; i < nb; ++i) | ||
327 | if (__put_user(REG_BYTE(rptr, i), addr + i)) | ||
328 | return -EFAULT; | ||
329 | if (nb0 > 0) { | ||
330 | rptr = ®s->gpr[0]; | ||
331 | addr += nb; | ||
332 | for (i = 0; i < nb0; ++i) | ||
333 | if (__put_user(REG_BYTE(rptr, i), addr + i)) | ||
334 | return -EFAULT; | ||
335 | } | ||
336 | } | ||
337 | return 1; | ||
205 | } | 338 | } |
206 | 339 | ||
207 | int | 340 | |
208 | fix_alignment(struct pt_regs *regs) | 341 | /* |
342 | * Called on alignment exception. Attempts to fixup | ||
343 | * | ||
344 | * Return 1 on success | ||
345 | * Return 0 if unable to handle the interrupt | ||
346 | * Return -EFAULT if data address is bad | ||
347 | */ | ||
348 | |||
349 | int fix_alignment(struct pt_regs *regs) | ||
209 | { | 350 | { |
210 | unsigned int instr, nb, flags; | 351 | unsigned int instr, nb, flags; |
211 | int t; | 352 | unsigned int reg, areg; |
212 | unsigned long reg, areg; | 353 | unsigned int dsisr; |
213 | unsigned long i; | ||
214 | int ret; | ||
215 | unsigned dsisr; | ||
216 | unsigned char __user *addr; | 354 | unsigned char __user *addr; |
217 | unsigned char __user *p; | 355 | unsigned char __user *p; |
218 | unsigned long __user *lp; | 356 | int ret, t; |
219 | union { | 357 | union { |
220 | long ll; | 358 | u64 ll; |
221 | double dd; | 359 | double dd; |
222 | unsigned char v[8]; | 360 | unsigned char v[8]; |
223 | struct { | 361 | struct { |
@@ -231,18 +369,22 @@ fix_alignment(struct pt_regs *regs) | |||
231 | } data; | 369 | } data; |
232 | 370 | ||
233 | /* | 371 | /* |
234 | * Return 1 on success | 372 | * We require a complete register set, if not, then our assembly |
235 | * Return 0 if unable to handle the interrupt | 373 | * is broken |
236 | * Return -EFAULT if data address is bad | ||
237 | */ | 374 | */ |
375 | CHECK_FULL_REGS(regs); | ||
238 | 376 | ||
239 | dsisr = regs->dsisr; | 377 | dsisr = regs->dsisr; |
240 | 378 | ||
379 | /* Some processors don't provide us with a DSISR we can use here, | ||
380 | * let's make one up from the instruction | ||
381 | */ | ||
241 | if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) { | 382 | if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) { |
242 | unsigned int real_instr; | 383 | unsigned int real_instr; |
243 | if (__get_user(real_instr, (unsigned int __user *)regs->nip)) | 384 | if (unlikely(__get_user(real_instr, |
244 | return 0; | 385 | (unsigned int __user *)regs->nip))) |
245 | dsisr = make_dsisr(real_instr); | 386 | return -EFAULT; |
387 | dsisr = make_dsisr(real_instr); | ||
246 | } | 388 | } |
247 | 389 | ||
248 | /* extract the operation and registers from the dsisr */ | 390 | /* extract the operation and registers from the dsisr */ |
@@ -258,33 +400,37 @@ fix_alignment(struct pt_regs *regs) | |||
258 | /* DAR has the operand effective address */ | 400 | /* DAR has the operand effective address */ |
259 | addr = (unsigned char __user *)regs->dar; | 401 | addr = (unsigned char __user *)regs->dar; |
260 | 402 | ||
261 | /* A size of 0 indicates an instruction we don't support */ | 403 | /* A size of 0 indicates an instruction we don't support, with |
262 | /* we also don't support the multiples (lmw, stmw, lmd, stmd) */ | 404 | * the exception of DCBZ which is handled as a special case here |
263 | if ((nb == 0) || (flags & M)) | ||
264 | return 0; /* too hard or invalid instruction */ | ||
265 | |||
266 | /* | ||
267 | * Special handling for dcbz | ||
268 | * dcbz may give an alignment exception for accesses to caching inhibited | ||
269 | * storage | ||
270 | */ | 405 | */ |
271 | if (instr == DCBZ) | 406 | if (instr == DCBZ) |
272 | addr = (unsigned char __user *) ((unsigned long)addr & -L1_CACHE_BYTES); | 407 | return emulate_dcbz(regs, addr); |
408 | if (unlikely(nb == 0)) | ||
409 | return 0; | ||
410 | |||
411 | /* Load/Store Multiple instructions are handled in their own | ||
412 | * function | ||
413 | */ | ||
414 | if (flags & M) | ||
415 | return emulate_multiple(regs, addr, reg, nb, flags, instr); | ||
273 | 416 | ||
274 | /* Verify the address of the operand */ | 417 | /* Verify the address of the operand */ |
275 | if (user_mode(regs)) { | 418 | if (unlikely(user_mode(regs) && |
276 | if (!access_ok((flags & ST? VERIFY_WRITE: VERIFY_READ), addr, nb)) | 419 | !access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ), |
277 | return -EFAULT; /* bad address */ | 420 | addr, nb))) |
278 | } | 421 | return -EFAULT; |
279 | 422 | ||
280 | /* Force the fprs into the save area so we can reference them */ | 423 | /* Force the fprs into the save area so we can reference them */ |
281 | if (flags & F) { | 424 | if (flags & F) { |
282 | if (!user_mode(regs)) | 425 | /* userland only */ |
426 | if (unlikely(!user_mode(regs))) | ||
283 | return 0; | 427 | return 0; |
284 | flush_fp_to_thread(current); | 428 | flush_fp_to_thread(current); |
285 | } | 429 | } |
286 | 430 | ||
287 | /* If we are loading, get the data from user space */ | 431 | /* If we are loading, get the data from user space, else |
432 | * get it from register values | ||
433 | */ | ||
288 | if (flags & LD) { | 434 | if (flags & LD) { |
289 | data.ll = 0; | 435 | data.ll = 0; |
290 | ret = 0; | 436 | ret = 0; |
@@ -301,75 +447,62 @@ fix_alignment(struct pt_regs *regs) | |||
301 | case 2: | 447 | case 2: |
302 | ret |= __get_user(data.v[6], p++); | 448 | ret |= __get_user(data.v[6], p++); |
303 | ret |= __get_user(data.v[7], p++); | 449 | ret |= __get_user(data.v[7], p++); |
304 | if (ret) | 450 | if (unlikely(ret)) |
305 | return -EFAULT; | 451 | return -EFAULT; |
306 | } | 452 | } |
307 | } | 453 | } else if (flags & F) |
308 | 454 | data.dd = current->thread.fpr[reg]; | |
309 | /* If we are storing, get the data from the saved gpr or fpr */ | 455 | else |
310 | if (flags & ST) { | 456 | data.ll = regs->gpr[reg]; |
311 | if (flags & F) { | 457 | |
312 | if (nb == 4) { | 458 | /* Perform other misc operations like sign extension, byteswap, |
313 | /* Doing stfs, have to convert to single */ | 459 | * or floating point single precision conversion |
314 | preempt_disable(); | 460 | */ |
315 | enable_kernel_fp(); | 461 | switch (flags & ~U) { |
316 | cvt_df(¤t->thread.fpr[reg], (float *)&data.v[4], ¤t->thread); | 462 | case LD+SE: /* sign extend */ |
317 | disable_kernel_fp(); | ||
318 | preempt_enable(); | ||
319 | } | ||
320 | else | ||
321 | data.dd = current->thread.fpr[reg]; | ||
322 | } | ||
323 | else | ||
324 | data.ll = regs->gpr[reg]; | ||
325 | } | ||
326 | |||
327 | /* Swap bytes as needed */ | ||
328 | if (flags & SW) { | ||
329 | if (nb == 2) | ||
330 | SWAP(data.v[6], data.v[7]); | ||
331 | else { /* nb must be 4 */ | ||
332 | SWAP(data.v[4], data.v[7]); | ||
333 | SWAP(data.v[5], data.v[6]); | ||
334 | } | ||
335 | } | ||
336 | |||
337 | /* Sign extend as needed */ | ||
338 | if (flags & SE) { | ||
339 | if ( nb == 2 ) | 463 | if ( nb == 2 ) |
340 | data.ll = data.x16.low16; | 464 | data.ll = data.x16.low16; |
341 | else /* nb must be 4 */ | 465 | else /* nb must be 4 */ |
342 | data.ll = data.x32.low32; | 466 | data.ll = data.x32.low32; |
343 | } | 467 | break; |
344 | 468 | case LD+S: /* byte-swap */ | |
345 | /* If we are loading, move the data to the gpr or fpr */ | 469 | case ST+S: |
346 | if (flags & LD) { | 470 | if (nb == 2) { |
347 | if (flags & F) { | 471 | SWAP(data.v[6], data.v[7]); |
348 | if (nb == 4) { | 472 | } else { |
349 | /* Doing lfs, have to convert to double */ | 473 | SWAP(data.v[4], data.v[7]); |
350 | preempt_disable(); | 474 | SWAP(data.v[5], data.v[6]); |
351 | enable_kernel_fp(); | ||
352 | cvt_fd((float *)&data.v[4], ¤t->thread.fpr[reg], ¤t->thread); | ||
353 | disable_kernel_fp(); | ||
354 | preempt_enable(); | ||
355 | } | ||
356 | else | ||
357 | current->thread.fpr[reg] = data.dd; | ||
358 | } | 475 | } |
359 | else | 476 | break; |
360 | regs->gpr[reg] = data.ll; | 477 | |
478 | /* Single-precision FP load and store require conversions... */ | ||
479 | case LD+F+S: | ||
480 | #ifdef CONFIG_PPC_FPU | ||
481 | preempt_disable(); | ||
482 | enable_kernel_fp(); | ||
483 | cvt_fd((float *)&data.v[4], &data.dd, ¤t->thread); | ||
484 | preempt_enable(); | ||
485 | #else | ||
486 | return 0; | ||
487 | #endif | ||
488 | break; | ||
489 | case ST+F+S: | ||
490 | #ifdef CONFIG_PPC_FPU | ||
491 | preempt_disable(); | ||
492 | enable_kernel_fp(); | ||
493 | cvt_df(&data.dd, (float *)&data.v[4], ¤t->thread); | ||
494 | preempt_enable(); | ||
495 | #else | ||
496 | return 0; | ||
497 | #endif | ||
498 | break; | ||
361 | } | 499 | } |
362 | 500 | ||
363 | /* If we are storing, copy the data to the user */ | 501 | /* Store result to memory or update registers */ |
364 | if (flags & ST) { | 502 | if (flags & ST) { |
365 | ret = 0; | 503 | ret = 0; |
366 | p = addr; | 504 | p = addr; |
367 | switch (nb) { | 505 | switch (nb) { |
368 | case 128: /* Special case - must be dcbz */ | ||
369 | lp = (unsigned long __user *)p; | ||
370 | for (i = 0; i < L1_CACHE_BYTES / sizeof(long); ++i) | ||
371 | ret |= __put_user(0, lp++); | ||
372 | break; | ||
373 | case 8: | 506 | case 8: |
374 | ret |= __put_user(data.v[0], p++); | 507 | ret |= __put_user(data.v[0], p++); |
375 | ret |= __put_user(data.v[1], p++); | 508 | ret |= __put_user(data.v[1], p++); |
@@ -382,15 +515,16 @@ fix_alignment(struct pt_regs *regs) | |||
382 | ret |= __put_user(data.v[6], p++); | 515 | ret |= __put_user(data.v[6], p++); |
383 | ret |= __put_user(data.v[7], p++); | 516 | ret |= __put_user(data.v[7], p++); |
384 | } | 517 | } |
385 | if (ret) | 518 | if (unlikely(ret)) |
386 | return -EFAULT; | 519 | return -EFAULT; |
387 | } | 520 | } else if (flags & F) |
388 | 521 | current->thread.fpr[reg] = data.dd; | |
522 | else | ||
523 | regs->gpr[reg] = data.ll; | ||
524 | |||
389 | /* Update RA as needed */ | 525 | /* Update RA as needed */ |
390 | if (flags & U) { | 526 | if (flags & U) |
391 | regs->gpr[areg] = regs->dar; | 527 | regs->gpr[areg] = regs->dar; |
392 | } | ||
393 | 528 | ||
394 | return 1; | 529 | return 1; |
395 | } | 530 | } |
396 | |||
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile index 17a4da65e275..0bb23fce4293 100644 --- a/arch/ppc/kernel/Makefile +++ b/arch/ppc/kernel/Makefile | |||
@@ -13,7 +13,7 @@ extra-$(CONFIG_POWER4) += idle_power4.o | |||
13 | extra-y += vmlinux.lds | 13 | extra-y += vmlinux.lds |
14 | 14 | ||
15 | obj-y := entry.o traps.o idle.o time.o misc.o \ | 15 | obj-y := entry.o traps.o idle.o time.o misc.o \ |
16 | process.o align.o \ | 16 | process.o \ |
17 | setup.o \ | 17 | setup.o \ |
18 | ppc_htab.o | 18 | ppc_htab.o |
19 | obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o | 19 | obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o |
@@ -38,7 +38,7 @@ endif | |||
38 | # These are here while we do the architecture merge | 38 | # These are here while we do the architecture merge |
39 | 39 | ||
40 | else | 40 | else |
41 | obj-y := idle.o align.o | 41 | obj-y := idle.o |
42 | obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o | 42 | obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o |
43 | obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o | 43 | obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o |
44 | obj-$(CONFIG_MODULES) += module.o | 44 | obj-$(CONFIG_MODULES) += module.o |
diff --git a/arch/ppc/kernel/align.c b/arch/ppc/kernel/align.c deleted file mode 100644 index ab398c4b70b6..000000000000 --- a/arch/ppc/kernel/align.c +++ /dev/null | |||
@@ -1,410 +0,0 @@ | |||
1 | /* | ||
2 | * align.c - handle alignment exceptions for the Power PC. | ||
3 | * | ||
4 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | ||
5 | * Copyright (c) 1998-1999 TiVo, Inc. | ||
6 | * PowerPC 403GCX modifications. | ||
7 | * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> | ||
8 | * PowerPC 403GCX/405GP modifications. | ||
9 | */ | ||
10 | #include <linux/config.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <asm/ptrace.h> | ||
14 | #include <asm/processor.h> | ||
15 | #include <asm/uaccess.h> | ||
16 | #include <asm/system.h> | ||
17 | #include <asm/cache.h> | ||
18 | |||
19 | struct aligninfo { | ||
20 | unsigned char len; | ||
21 | unsigned char flags; | ||
22 | }; | ||
23 | |||
24 | #if defined(CONFIG_4xx) || defined(CONFIG_POWER4) || defined(CONFIG_BOOKE) | ||
25 | #define OPCD(inst) (((inst) & 0xFC000000) >> 26) | ||
26 | #define RS(inst) (((inst) & 0x03E00000) >> 21) | ||
27 | #define RA(inst) (((inst) & 0x001F0000) >> 16) | ||
28 | #define IS_XFORM(code) ((code) == 31) | ||
29 | #endif | ||
30 | |||
31 | #define INVALID { 0, 0 } | ||
32 | |||
33 | #define LD 1 /* load */ | ||
34 | #define ST 2 /* store */ | ||
35 | #define SE 4 /* sign-extend value */ | ||
36 | #define F 8 /* to/from fp regs */ | ||
37 | #define U 0x10 /* update index register */ | ||
38 | #define M 0x20 /* multiple load/store */ | ||
39 | #define S 0x40 /* single-precision fp, or byte-swap value */ | ||
40 | #define SX 0x40 /* byte count in XER */ | ||
41 | #define HARD 0x80 /* string, stwcx. */ | ||
42 | |||
43 | #define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */ | ||
44 | |||
45 | /* | ||
46 | * The PowerPC stores certain bits of the instruction that caused the | ||
47 | * alignment exception in the DSISR register. This array maps those | ||
48 | * bits to information about the operand length and what the | ||
49 | * instruction would do. | ||
50 | */ | ||
51 | static struct aligninfo aligninfo[128] = { | ||
52 | { 4, LD }, /* 00 0 0000: lwz / lwarx */ | ||
53 | INVALID, /* 00 0 0001 */ | ||
54 | { 4, ST }, /* 00 0 0010: stw */ | ||
55 | INVALID, /* 00 0 0011 */ | ||
56 | { 2, LD }, /* 00 0 0100: lhz */ | ||
57 | { 2, LD+SE }, /* 00 0 0101: lha */ | ||
58 | { 2, ST }, /* 00 0 0110: sth */ | ||
59 | { 4, LD+M }, /* 00 0 0111: lmw */ | ||
60 | { 4, LD+F+S }, /* 00 0 1000: lfs */ | ||
61 | { 8, LD+F }, /* 00 0 1001: lfd */ | ||
62 | { 4, ST+F+S }, /* 00 0 1010: stfs */ | ||
63 | { 8, ST+F }, /* 00 0 1011: stfd */ | ||
64 | INVALID, /* 00 0 1100 */ | ||
65 | INVALID, /* 00 0 1101: ld/ldu/lwa */ | ||
66 | INVALID, /* 00 0 1110 */ | ||
67 | INVALID, /* 00 0 1111: std/stdu */ | ||
68 | { 4, LD+U }, /* 00 1 0000: lwzu */ | ||
69 | INVALID, /* 00 1 0001 */ | ||
70 | { 4, ST+U }, /* 00 1 0010: stwu */ | ||
71 | INVALID, /* 00 1 0011 */ | ||
72 | { 2, LD+U }, /* 00 1 0100: lhzu */ | ||
73 | { 2, LD+SE+U }, /* 00 1 0101: lhau */ | ||
74 | { 2, ST+U }, /* 00 1 0110: sthu */ | ||
75 | { 4, ST+M }, /* 00 1 0111: stmw */ | ||
76 | { 4, LD+F+S+U }, /* 00 1 1000: lfsu */ | ||
77 | { 8, LD+F+U }, /* 00 1 1001: lfdu */ | ||
78 | { 4, ST+F+S+U }, /* 00 1 1010: stfsu */ | ||
79 | { 8, ST+F+U }, /* 00 1 1011: stfdu */ | ||
80 | INVALID, /* 00 1 1100 */ | ||
81 | INVALID, /* 00 1 1101 */ | ||
82 | INVALID, /* 00 1 1110 */ | ||
83 | INVALID, /* 00 1 1111 */ | ||
84 | INVALID, /* 01 0 0000: ldx */ | ||
85 | INVALID, /* 01 0 0001 */ | ||
86 | INVALID, /* 01 0 0010: stdx */ | ||
87 | INVALID, /* 01 0 0011 */ | ||
88 | INVALID, /* 01 0 0100 */ | ||
89 | INVALID, /* 01 0 0101: lwax */ | ||
90 | INVALID, /* 01 0 0110 */ | ||
91 | INVALID, /* 01 0 0111 */ | ||
92 | { 4, LD+M+HARD+SX }, /* 01 0 1000: lswx */ | ||
93 | { 4, LD+M+HARD }, /* 01 0 1001: lswi */ | ||
94 | { 4, ST+M+HARD+SX }, /* 01 0 1010: stswx */ | ||
95 | { 4, ST+M+HARD }, /* 01 0 1011: stswi */ | ||
96 | INVALID, /* 01 0 1100 */ | ||
97 | INVALID, /* 01 0 1101 */ | ||
98 | INVALID, /* 01 0 1110 */ | ||
99 | INVALID, /* 01 0 1111 */ | ||
100 | INVALID, /* 01 1 0000: ldux */ | ||
101 | INVALID, /* 01 1 0001 */ | ||
102 | INVALID, /* 01 1 0010: stdux */ | ||
103 | INVALID, /* 01 1 0011 */ | ||
104 | INVALID, /* 01 1 0100 */ | ||
105 | INVALID, /* 01 1 0101: lwaux */ | ||
106 | INVALID, /* 01 1 0110 */ | ||
107 | INVALID, /* 01 1 0111 */ | ||
108 | INVALID, /* 01 1 1000 */ | ||
109 | INVALID, /* 01 1 1001 */ | ||
110 | INVALID, /* 01 1 1010 */ | ||
111 | INVALID, /* 01 1 1011 */ | ||
112 | INVALID, /* 01 1 1100 */ | ||
113 | INVALID, /* 01 1 1101 */ | ||
114 | INVALID, /* 01 1 1110 */ | ||
115 | INVALID, /* 01 1 1111 */ | ||
116 | INVALID, /* 10 0 0000 */ | ||
117 | INVALID, /* 10 0 0001 */ | ||
118 | { 0, ST+HARD }, /* 10 0 0010: stwcx. */ | ||
119 | INVALID, /* 10 0 0011 */ | ||
120 | INVALID, /* 10 0 0100 */ | ||
121 | INVALID, /* 10 0 0101 */ | ||
122 | INVALID, /* 10 0 0110 */ | ||
123 | INVALID, /* 10 0 0111 */ | ||
124 | { 4, LD+S }, /* 10 0 1000: lwbrx */ | ||
125 | INVALID, /* 10 0 1001 */ | ||
126 | { 4, ST+S }, /* 10 0 1010: stwbrx */ | ||
127 | INVALID, /* 10 0 1011 */ | ||
128 | { 2, LD+S }, /* 10 0 1100: lhbrx */ | ||
129 | INVALID, /* 10 0 1101 */ | ||
130 | { 2, ST+S }, /* 10 0 1110: sthbrx */ | ||
131 | INVALID, /* 10 0 1111 */ | ||
132 | INVALID, /* 10 1 0000 */ | ||
133 | INVALID, /* 10 1 0001 */ | ||
134 | INVALID, /* 10 1 0010 */ | ||
135 | INVALID, /* 10 1 0011 */ | ||
136 | INVALID, /* 10 1 0100 */ | ||
137 | INVALID, /* 10 1 0101 */ | ||
138 | INVALID, /* 10 1 0110 */ | ||
139 | INVALID, /* 10 1 0111 */ | ||
140 | INVALID, /* 10 1 1000 */ | ||
141 | INVALID, /* 10 1 1001 */ | ||
142 | INVALID, /* 10 1 1010 */ | ||
143 | INVALID, /* 10 1 1011 */ | ||
144 | INVALID, /* 10 1 1100 */ | ||
145 | INVALID, /* 10 1 1101 */ | ||
146 | INVALID, /* 10 1 1110 */ | ||
147 | { 0, ST+HARD }, /* 10 1 1111: dcbz */ | ||
148 | { 4, LD }, /* 11 0 0000: lwzx */ | ||
149 | INVALID, /* 11 0 0001 */ | ||
150 | { 4, ST }, /* 11 0 0010: stwx */ | ||
151 | INVALID, /* 11 0 0011 */ | ||
152 | { 2, LD }, /* 11 0 0100: lhzx */ | ||
153 | { 2, LD+SE }, /* 11 0 0101: lhax */ | ||
154 | { 2, ST }, /* 11 0 0110: sthx */ | ||
155 | INVALID, /* 11 0 0111 */ | ||
156 | { 4, LD+F+S }, /* 11 0 1000: lfsx */ | ||
157 | { 8, LD+F }, /* 11 0 1001: lfdx */ | ||
158 | { 4, ST+F+S }, /* 11 0 1010: stfsx */ | ||
159 | { 8, ST+F }, /* 11 0 1011: stfdx */ | ||
160 | INVALID, /* 11 0 1100 */ | ||
161 | INVALID, /* 11 0 1101: lmd */ | ||
162 | INVALID, /* 11 0 1110 */ | ||
163 | INVALID, /* 11 0 1111: stmd */ | ||
164 | { 4, LD+U }, /* 11 1 0000: lwzux */ | ||
165 | INVALID, /* 11 1 0001 */ | ||
166 | { 4, ST+U }, /* 11 1 0010: stwux */ | ||
167 | INVALID, /* 11 1 0011 */ | ||
168 | { 2, LD+U }, /* 11 1 0100: lhzux */ | ||
169 | { 2, LD+SE+U }, /* 11 1 0101: lhaux */ | ||
170 | { 2, ST+U }, /* 11 1 0110: sthux */ | ||
171 | INVALID, /* 11 1 0111 */ | ||
172 | { 4, LD+F+S+U }, /* 11 1 1000: lfsux */ | ||
173 | { 8, LD+F+U }, /* 11 1 1001: lfdux */ | ||
174 | { 4, ST+F+S+U }, /* 11 1 1010: stfsux */ | ||
175 | { 8, ST+F+U }, /* 11 1 1011: stfdux */ | ||
176 | INVALID, /* 11 1 1100 */ | ||
177 | INVALID, /* 11 1 1101 */ | ||
178 | INVALID, /* 11 1 1110 */ | ||
179 | INVALID, /* 11 1 1111 */ | ||
180 | }; | ||
181 | |||
182 | #define SWAP(a, b) (t = (a), (a) = (b), (b) = t) | ||
183 | |||
184 | int | ||
185 | fix_alignment(struct pt_regs *regs) | ||
186 | { | ||
187 | int instr, nb, flags; | ||
188 | #if defined(CONFIG_4xx) || defined(CONFIG_POWER4) || defined(CONFIG_BOOKE) | ||
189 | int opcode, f1, f2, f3; | ||
190 | #endif | ||
191 | int i, t; | ||
192 | int reg, areg; | ||
193 | int offset, nb0; | ||
194 | unsigned char __user *addr; | ||
195 | unsigned char *rptr; | ||
196 | union { | ||
197 | long l; | ||
198 | float f; | ||
199 | double d; | ||
200 | unsigned char v[8]; | ||
201 | } data; | ||
202 | |||
203 | CHECK_FULL_REGS(regs); | ||
204 | |||
205 | #if defined(CONFIG_4xx) || defined(CONFIG_POWER4) || defined(CONFIG_BOOKE) | ||
206 | /* The 4xx-family & Book-E processors have no DSISR register, | ||
207 | * so we emulate it. | ||
208 | * The POWER4 has a DSISR register but doesn't set it on | ||
209 | * an alignment fault. -- paulus | ||
210 | */ | ||
211 | |||
212 | if (__get_user(instr, (unsigned int __user *) regs->nip)) | ||
213 | return 0; | ||
214 | opcode = OPCD(instr); | ||
215 | reg = RS(instr); | ||
216 | areg = RA(instr); | ||
217 | |||
218 | if (!IS_XFORM(opcode)) { | ||
219 | f1 = 0; | ||
220 | f2 = (instr & 0x04000000) >> 26; | ||
221 | f3 = (instr & 0x78000000) >> 27; | ||
222 | } else { | ||
223 | f1 = (instr & 0x00000006) >> 1; | ||
224 | f2 = (instr & 0x00000040) >> 6; | ||
225 | f3 = (instr & 0x00000780) >> 7; | ||
226 | } | ||
227 | |||
228 | instr = ((f1 << 5) | (f2 << 4) | f3); | ||
229 | #else | ||
230 | reg = (regs->dsisr >> 5) & 0x1f; /* source/dest register */ | ||
231 | areg = regs->dsisr & 0x1f; /* register to update */ | ||
232 | instr = (regs->dsisr >> 10) & 0x7f; | ||
233 | #endif | ||
234 | |||
235 | nb = aligninfo[instr].len; | ||
236 | if (nb == 0) { | ||
237 | long __user *p; | ||
238 | int i; | ||
239 | |||
240 | if (instr != DCBZ) | ||
241 | return 0; /* too hard or invalid instruction */ | ||
242 | /* | ||
243 | * The dcbz (data cache block zero) instruction | ||
244 | * gives an alignment fault if used on non-cacheable | ||
245 | * memory. We handle the fault mainly for the | ||
246 | * case when we are running with the cache disabled | ||
247 | * for debugging. | ||
248 | */ | ||
249 | p = (long __user *) (regs->dar & -L1_CACHE_BYTES); | ||
250 | if (user_mode(regs) | ||
251 | && !access_ok(VERIFY_WRITE, p, L1_CACHE_BYTES)) | ||
252 | return -EFAULT; | ||
253 | for (i = 0; i < L1_CACHE_BYTES / sizeof(long); ++i) | ||
254 | if (__put_user(0, p+i)) | ||
255 | return -EFAULT; | ||
256 | return 1; | ||
257 | } | ||
258 | |||
259 | flags = aligninfo[instr].flags; | ||
260 | if ((flags & (LD|ST)) == 0) | ||
261 | return 0; | ||
262 | |||
263 | /* For the 4xx-family & Book-E processors, the 'dar' field of the | ||
264 | * pt_regs structure is overloaded and is really from the DEAR. | ||
265 | */ | ||
266 | |||
267 | addr = (unsigned char __user *)regs->dar; | ||
268 | |||
269 | if (flags & M) { | ||
270 | /* lmw, stmw, lswi/x, stswi/x */ | ||
271 | nb0 = 0; | ||
272 | if (flags & HARD) { | ||
273 | if (flags & SX) { | ||
274 | nb = regs->xer & 127; | ||
275 | if (nb == 0) | ||
276 | return 1; | ||
277 | } else { | ||
278 | if (__get_user(instr, | ||
279 | (unsigned int __user *)regs->nip)) | ||
280 | return 0; | ||
281 | nb = (instr >> 11) & 0x1f; | ||
282 | if (nb == 0) | ||
283 | nb = 32; | ||
284 | } | ||
285 | if (nb + reg * 4 > 128) { | ||
286 | nb0 = nb + reg * 4 - 128; | ||
287 | nb = 128 - reg * 4; | ||
288 | } | ||
289 | } else { | ||
290 | /* lwm, stmw */ | ||
291 | nb = (32 - reg) * 4; | ||
292 | } | ||
293 | |||
294 | if (!access_ok((flags & ST? VERIFY_WRITE: VERIFY_READ), addr, nb+nb0)) | ||
295 | return -EFAULT; /* bad address */ | ||
296 | |||
297 | rptr = (unsigned char *) ®s->gpr[reg]; | ||
298 | if (flags & LD) { | ||
299 | for (i = 0; i < nb; ++i) | ||
300 | if (__get_user(rptr[i], addr+i)) | ||
301 | return -EFAULT; | ||
302 | if (nb0 > 0) { | ||
303 | rptr = (unsigned char *) ®s->gpr[0]; | ||
304 | addr += nb; | ||
305 | for (i = 0; i < nb0; ++i) | ||
306 | if (__get_user(rptr[i], addr+i)) | ||
307 | return -EFAULT; | ||
308 | } | ||
309 | for (; (i & 3) != 0; ++i) | ||
310 | rptr[i] = 0; | ||
311 | } else { | ||
312 | for (i = 0; i < nb; ++i) | ||
313 | if (__put_user(rptr[i], addr+i)) | ||
314 | return -EFAULT; | ||
315 | if (nb0 > 0) { | ||
316 | rptr = (unsigned char *) ®s->gpr[0]; | ||
317 | addr += nb; | ||
318 | for (i = 0; i < nb0; ++i) | ||
319 | if (__put_user(rptr[i], addr+i)) | ||
320 | return -EFAULT; | ||
321 | } | ||
322 | } | ||
323 | return 1; | ||
324 | } | ||
325 | |||
326 | offset = 0; | ||
327 | if (nb < 4) { | ||
328 | /* read/write the least significant bits */ | ||
329 | data.l = 0; | ||
330 | offset = 4 - nb; | ||
331 | } | ||
332 | |||
333 | /* Verify the address of the operand */ | ||
334 | if (user_mode(regs)) { | ||
335 | if (!access_ok((flags & ST? VERIFY_WRITE: VERIFY_READ), addr, nb)) | ||
336 | return -EFAULT; /* bad address */ | ||
337 | } | ||
338 | |||
339 | if (flags & F) { | ||
340 | preempt_disable(); | ||
341 | if (regs->msr & MSR_FP) | ||
342 | giveup_fpu(current); | ||
343 | preempt_enable(); | ||
344 | } | ||
345 | |||
346 | /* If we read the operand, copy it in, else get register values */ | ||
347 | if (flags & LD) { | ||
348 | for (i = 0; i < nb; ++i) | ||
349 | if (__get_user(data.v[offset+i], addr+i)) | ||
350 | return -EFAULT; | ||
351 | } else if (flags & F) { | ||
352 | data.d = current->thread.fpr[reg]; | ||
353 | } else { | ||
354 | data.l = regs->gpr[reg]; | ||
355 | } | ||
356 | |||
357 | switch (flags & ~U) { | ||
358 | case LD+SE: /* sign extend */ | ||
359 | if (data.v[2] >= 0x80) | ||
360 | data.v[0] = data.v[1] = -1; | ||
361 | break; | ||
362 | |||
363 | case LD+S: /* byte-swap */ | ||
364 | case ST+S: | ||
365 | if (nb == 2) { | ||
366 | SWAP(data.v[2], data.v[3]); | ||
367 | } else { | ||
368 | SWAP(data.v[0], data.v[3]); | ||
369 | SWAP(data.v[1], data.v[2]); | ||
370 | } | ||
371 | break; | ||
372 | |||
373 | /* Single-precision FP load and store require conversions... */ | ||
374 | case LD+F+S: | ||
375 | #ifdef CONFIG_PPC_FPU | ||
376 | preempt_disable(); | ||
377 | enable_kernel_fp(); | ||
378 | cvt_fd(&data.f, &data.d, ¤t->thread); | ||
379 | preempt_enable(); | ||
380 | #else | ||
381 | return 0; | ||
382 | #endif | ||
383 | break; | ||
384 | case ST+F+S: | ||
385 | #ifdef CONFIG_PPC_FPU | ||
386 | preempt_disable(); | ||
387 | enable_kernel_fp(); | ||
388 | cvt_df(&data.d, &data.f, ¤t->thread); | ||
389 | preempt_enable(); | ||
390 | #else | ||
391 | return 0; | ||
392 | #endif | ||
393 | break; | ||
394 | } | ||
395 | |||
396 | if (flags & ST) { | ||
397 | for (i = 0; i < nb; ++i) | ||
398 | if (__put_user(data.v[offset+i], addr+i)) | ||
399 | return -EFAULT; | ||
400 | } else if (flags & F) { | ||
401 | current->thread.fpr[reg] = data.d; | ||
402 | } else { | ||
403 | regs->gpr[reg] = data.l; | ||
404 | } | ||
405 | |||
406 | if (flags & U) | ||
407 | regs->gpr[areg] = regs->dar; | ||
408 | |||
409 | return 1; | ||
410 | } | ||
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile index e876c213f5ce..5f0abdb66e0b 100644 --- a/arch/ppc64/kernel/Makefile +++ b/arch/ppc64/kernel/Makefile | |||
@@ -2,6 +2,6 @@ | |||
2 | # Makefile for the linux ppc64 kernel. | 2 | # Makefile for the linux ppc64 kernel. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += idle.o align.o | 5 | obj-y += idle.o |
6 | 6 | ||
7 | obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o | 7 | obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o |
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 04e2726002cf..d1cfa3f515ea 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h | |||
@@ -90,6 +90,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset); | |||
90 | #define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000) | 90 | #define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000) |
91 | #define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000) | 91 | #define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000) |
92 | #define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000) | 92 | #define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000) |
93 | #define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000) | ||
93 | 94 | ||
94 | #ifdef __powerpc64__ | 95 | #ifdef __powerpc64__ |
95 | /* Add the 64b processor unique features in the top half of the word */ | 96 | /* Add the 64b processor unique features in the top half of the word */ |
@@ -97,7 +98,6 @@ extern void do_cpu_ftr_fixups(unsigned long offset); | |||
97 | #define CPU_FTR_16M_PAGE ASM_CONST(0x0000000200000000) | 98 | #define CPU_FTR_16M_PAGE ASM_CONST(0x0000000200000000) |
98 | #define CPU_FTR_TLBIEL ASM_CONST(0x0000000400000000) | 99 | #define CPU_FTR_TLBIEL ASM_CONST(0x0000000400000000) |
99 | #define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000800000000) | 100 | #define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000800000000) |
100 | #define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000001000000000) | ||
101 | #define CPU_FTR_IABR ASM_CONST(0x0000002000000000) | 101 | #define CPU_FTR_IABR ASM_CONST(0x0000002000000000) |
102 | #define CPU_FTR_MMCRA ASM_CONST(0x0000004000000000) | 102 | #define CPU_FTR_MMCRA ASM_CONST(0x0000004000000000) |
103 | #define CPU_FTR_CTRL ASM_CONST(0x0000008000000000) | 103 | #define CPU_FTR_CTRL ASM_CONST(0x0000008000000000) |
@@ -113,7 +113,6 @@ extern void do_cpu_ftr_fixups(unsigned long offset); | |||
113 | #define CPU_FTR_16M_PAGE ASM_CONST(0x0) | 113 | #define CPU_FTR_16M_PAGE ASM_CONST(0x0) |
114 | #define CPU_FTR_TLBIEL ASM_CONST(0x0) | 114 | #define CPU_FTR_TLBIEL ASM_CONST(0x0) |
115 | #define CPU_FTR_NOEXECUTE ASM_CONST(0x0) | 115 | #define CPU_FTR_NOEXECUTE ASM_CONST(0x0) |
116 | #define CPU_FTR_NODSISRALIGN ASM_CONST(0x0) | ||
117 | #define CPU_FTR_IABR ASM_CONST(0x0) | 116 | #define CPU_FTR_IABR ASM_CONST(0x0) |
118 | #define CPU_FTR_MMCRA ASM_CONST(0x0) | 117 | #define CPU_FTR_MMCRA ASM_CONST(0x0) |
119 | #define CPU_FTR_CTRL ASM_CONST(0x0) | 118 | #define CPU_FTR_CTRL ASM_CONST(0x0) |
@@ -273,18 +272,21 @@ enum { | |||
273 | CPU_FTRS_POWER3_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | | 272 | CPU_FTRS_POWER3_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | |
274 | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE, | 273 | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE, |
275 | CPU_FTRS_POWER4_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | | 274 | CPU_FTRS_POWER4_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | |
276 | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE, | 275 | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_NODSISRALIGN, |
277 | CPU_FTRS_970_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | | 276 | CPU_FTRS_970_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | |
278 | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_ALTIVEC_COMP | | 277 | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_ALTIVEC_COMP | |
279 | CPU_FTR_MAYBE_CAN_NAP, | 278 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN, |
280 | CPU_FTRS_8XX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB, | 279 | CPU_FTRS_8XX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB, |
281 | CPU_FTRS_40X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB, | 280 | CPU_FTRS_40X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | |
282 | CPU_FTRS_44X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB, | 281 | CPU_FTR_NODSISRALIGN, |
283 | CPU_FTRS_E200 = CPU_FTR_USE_TB, | 282 | CPU_FTRS_44X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | |
284 | CPU_FTRS_E500 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB, | 283 | CPU_FTR_NODSISRALIGN, |
284 | CPU_FTRS_E200 = CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN, | ||
285 | CPU_FTRS_E500 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | ||
286 | CPU_FTR_NODSISRALIGN, | ||
285 | CPU_FTRS_E500_2 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | 287 | CPU_FTRS_E500_2 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | |
286 | CPU_FTR_BIG_PHYS, | 288 | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN, |
287 | CPU_FTRS_GENERIC_32 = CPU_FTR_COMMON, | 289 | CPU_FTRS_GENERIC_32 = CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN, |
288 | #ifdef __powerpc64__ | 290 | #ifdef __powerpc64__ |
289 | CPU_FTRS_POWER3 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | | 291 | CPU_FTRS_POWER3 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | |
290 | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR, | 292 | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR, |