diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-07-08 21:25:48 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-07-08 21:25:48 -0400 |
commit | 5f07aa7524e98d6f68f2bec54f155ef6012e2c9a (patch) | |
tree | 393b2db3213e6618ad1338f9b66019b0c05c6f75 /arch/powerpc/lib | |
parent | e467e104bb7482170b79f516d2025e7cfcaaa733 (diff) | |
parent | d09ec7387184eba9e3030496f0451204090ff610 (diff) |
Merge commit 'paulus-perf/master' into next
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r-- | arch/powerpc/lib/Makefile | 5 | ||||
-rw-r--r-- | arch/powerpc/lib/ldstfp.S | 375 | ||||
-rw-r--r-- | arch/powerpc/lib/sstep.c | 1514 |
3 files changed, 1847 insertions, 47 deletions
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 111da1c03a11..5bb89c828070 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile | |||
@@ -18,8 +18,9 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o | |||
18 | 18 | ||
19 | obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \ | 19 | obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \ |
20 | memcpy_64.o usercopy_64.o mem_64.o string.o | 20 | memcpy_64.o usercopy_64.o mem_64.o string.o |
21 | obj-$(CONFIG_XMON) += sstep.o | 21 | obj-$(CONFIG_XMON) += sstep.o ldstfp.o |
22 | obj-$(CONFIG_KPROBES) += sstep.o | 22 | obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o |
23 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o | ||
23 | 24 | ||
24 | ifeq ($(CONFIG_PPC64),y) | 25 | ifeq ($(CONFIG_PPC64),y) |
25 | obj-$(CONFIG_SMP) += locks.o | 26 | obj-$(CONFIG_SMP) += locks.o |
diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S new file mode 100644 index 000000000000..f6448636baf5 --- /dev/null +++ b/arch/powerpc/lib/ldstfp.S | |||
@@ -0,0 +1,375 @@ | |||
1 | /* | ||
2 | * Floating-point, VMX/Altivec and VSX loads and stores | ||
3 | * for use in instruction emulation. | ||
4 | * | ||
5 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <asm/processor.h> | ||
14 | #include <asm/ppc_asm.h> | ||
15 | #include <asm/ppc-opcode.h> | ||
16 | #include <asm/reg.h> | ||
17 | #include <asm/asm-offsets.h> | ||
18 | #include <linux/errno.h> | ||
19 | |||
20 | #define STKFRM (PPC_MIN_STKFRM + 16) | ||
21 | |||
22 | .macro extab instr,handler | ||
23 | .section __ex_table,"a" | ||
24 | PPC_LONG \instr,\handler | ||
25 | .previous | ||
26 | .endm | ||
27 | |||
28 | .macro inst32 op | ||
29 | reg = 0 | ||
30 | .rept 32 | ||
31 | 20: \op reg,0,r4 | ||
32 | b 3f | ||
33 | extab 20b,99f | ||
34 | reg = reg + 1 | ||
35 | .endr | ||
36 | .endm | ||
37 | |||
38 | /* Get the contents of frN into fr0; N is in r3. */ | ||
39 | _GLOBAL(get_fpr) | ||
40 | mflr r0 | ||
41 | rlwinm r3,r3,3,0xf8 | ||
42 | bcl 20,31,1f | ||
43 | blr /* fr0 is already in fr0 */ | ||
44 | nop | ||
45 | reg = 1 | ||
46 | .rept 31 | ||
47 | fmr fr0,reg | ||
48 | blr | ||
49 | reg = reg + 1 | ||
50 | .endr | ||
51 | 1: mflr r5 | ||
52 | add r5,r3,r5 | ||
53 | mtctr r5 | ||
54 | mtlr r0 | ||
55 | bctr | ||
56 | |||
57 | /* Put the contents of fr0 into frN; N is in r3. */ | ||
58 | _GLOBAL(put_fpr) | ||
59 | mflr r0 | ||
60 | rlwinm r3,r3,3,0xf8 | ||
61 | bcl 20,31,1f | ||
62 | blr /* fr0 is already in fr0 */ | ||
63 | nop | ||
64 | reg = 1 | ||
65 | .rept 31 | ||
66 | fmr reg,fr0 | ||
67 | blr | ||
68 | reg = reg + 1 | ||
69 | .endr | ||
70 | 1: mflr r5 | ||
71 | add r5,r3,r5 | ||
72 | mtctr r5 | ||
73 | mtlr r0 | ||
74 | bctr | ||
75 | |||
76 | /* Load FP reg N from float at *p. N is in r3, p in r4. */ | ||
77 | _GLOBAL(do_lfs) | ||
78 | PPC_STLU r1,-STKFRM(r1) | ||
79 | mflr r0 | ||
80 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | ||
81 | mfmsr r6 | ||
82 | ori r7,r6,MSR_FP | ||
83 | cmpwi cr7,r3,0 | ||
84 | mtmsrd r7 | ||
85 | isync | ||
86 | beq cr7,1f | ||
87 | stfd fr0,STKFRM-16(r1) | ||
88 | 1: li r9,-EFAULT | ||
89 | 2: lfs fr0,0(r4) | ||
90 | li r9,0 | ||
91 | 3: bl put_fpr | ||
92 | beq cr7,4f | ||
93 | lfd fr0,STKFRM-16(r1) | ||
94 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | ||
95 | mtlr r0 | ||
96 | mtmsrd r6 | ||
97 | isync | ||
98 | mr r3,r9 | ||
99 | addi r1,r1,STKFRM | ||
100 | blr | ||
101 | extab 2b,3b | ||
102 | |||
103 | /* Load FP reg N from double at *p. N is in r3, p in r4. */ | ||
104 | _GLOBAL(do_lfd) | ||
105 | PPC_STLU r1,-STKFRM(r1) | ||
106 | mflr r0 | ||
107 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | ||
108 | mfmsr r6 | ||
109 | ori r7,r6,MSR_FP | ||
110 | cmpwi cr7,r3,0 | ||
111 | mtmsrd r7 | ||
112 | isync | ||
113 | beq cr7,1f | ||
114 | stfd fr0,STKFRM-16(r1) | ||
115 | 1: li r9,-EFAULT | ||
116 | 2: lfd fr0,0(r4) | ||
117 | li r9,0 | ||
118 | 3: beq cr7,4f | ||
119 | bl put_fpr | ||
120 | lfd fr0,STKFRM-16(r1) | ||
121 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | ||
122 | mtlr r0 | ||
123 | mtmsrd r6 | ||
124 | isync | ||
125 | mr r3,r9 | ||
126 | addi r1,r1,STKFRM | ||
127 | blr | ||
128 | extab 2b,3b | ||
129 | |||
130 | /* Store FP reg N to float at *p. N is in r3, p in r4. */ | ||
131 | _GLOBAL(do_stfs) | ||
132 | PPC_STLU r1,-STKFRM(r1) | ||
133 | mflr r0 | ||
134 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | ||
135 | mfmsr r6 | ||
136 | ori r7,r6,MSR_FP | ||
137 | cmpwi cr7,r3,0 | ||
138 | mtmsrd r7 | ||
139 | isync | ||
140 | beq cr7,1f | ||
141 | stfd fr0,STKFRM-16(r1) | ||
142 | bl get_fpr | ||
143 | 1: li r9,-EFAULT | ||
144 | 2: stfs fr0,0(r4) | ||
145 | li r9,0 | ||
146 | 3: beq cr7,4f | ||
147 | lfd fr0,STKFRM-16(r1) | ||
148 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | ||
149 | mtlr r0 | ||
150 | mtmsrd r6 | ||
151 | isync | ||
152 | mr r3,r9 | ||
153 | addi r1,r1,STKFRM | ||
154 | blr | ||
155 | extab 2b,3b | ||
156 | |||
157 | /* Store FP reg N to double at *p. N is in r3, p in r4. */ | ||
158 | _GLOBAL(do_stfd) | ||
159 | PPC_STLU r1,-STKFRM(r1) | ||
160 | mflr r0 | ||
161 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | ||
162 | mfmsr r6 | ||
163 | ori r7,r6,MSR_FP | ||
164 | cmpwi cr7,r3,0 | ||
165 | mtmsrd r7 | ||
166 | isync | ||
167 | beq cr7,1f | ||
168 | stfd fr0,STKFRM-16(r1) | ||
169 | bl get_fpr | ||
170 | 1: li r9,-EFAULT | ||
171 | 2: stfd fr0,0(r4) | ||
172 | li r9,0 | ||
173 | 3: beq cr7,4f | ||
174 | lfd fr0,STKFRM-16(r1) | ||
175 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | ||
176 | mtlr r0 | ||
177 | mtmsrd r6 | ||
178 | isync | ||
179 | mr r3,r9 | ||
180 | addi r1,r1,STKFRM | ||
181 | blr | ||
182 | extab 2b,3b | ||
183 | |||
184 | #ifdef CONFIG_ALTIVEC | ||
185 | /* Get the contents of vrN into vr0; N is in r3. */ | ||
186 | _GLOBAL(get_vr) | ||
187 | mflr r0 | ||
188 | rlwinm r3,r3,3,0xf8 | ||
189 | bcl 20,31,1f | ||
190 | blr /* vr0 is already in vr0 */ | ||
191 | nop | ||
192 | reg = 1 | ||
193 | .rept 31 | ||
194 | vor vr0,reg,reg /* assembler doesn't know vmr? */ | ||
195 | blr | ||
196 | reg = reg + 1 | ||
197 | .endr | ||
198 | 1: mflr r5 | ||
199 | add r5,r3,r5 | ||
200 | mtctr r5 | ||
201 | mtlr r0 | ||
202 | bctr | ||
203 | |||
204 | /* Put the contents of vr0 into vrN; N is in r3. */ | ||
205 | _GLOBAL(put_vr) | ||
206 | mflr r0 | ||
207 | rlwinm r3,r3,3,0xf8 | ||
208 | bcl 20,31,1f | ||
209 | blr /* vr0 is already in vr0 */ | ||
210 | nop | ||
211 | reg = 1 | ||
212 | .rept 31 | ||
213 | vor reg,vr0,vr0 | ||
214 | blr | ||
215 | reg = reg + 1 | ||
216 | .endr | ||
217 | 1: mflr r5 | ||
218 | add r5,r3,r5 | ||
219 | mtctr r5 | ||
220 | mtlr r0 | ||
221 | bctr | ||
222 | |||
223 | /* Load vector reg N from *p. N is in r3, p in r4. */ | ||
224 | _GLOBAL(do_lvx) | ||
225 | PPC_STLU r1,-STKFRM(r1) | ||
226 | mflr r0 | ||
227 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | ||
228 | mfmsr r6 | ||
229 | oris r7,r6,MSR_VEC@h | ||
230 | cmpwi cr7,r3,0 | ||
231 | li r8,STKFRM-16 | ||
232 | mtmsrd r7 | ||
233 | isync | ||
234 | beq cr7,1f | ||
235 | stvx vr0,r1,r8 | ||
236 | 1: li r9,-EFAULT | ||
237 | 2: lvx vr0,0,r4 | ||
238 | li r9,0 | ||
239 | 3: beq cr7,4f | ||
240 | bl put_vr | ||
241 | lvx vr0,r1,r8 | ||
242 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | ||
243 | mtlr r0 | ||
244 | mtmsrd r6 | ||
245 | isync | ||
246 | mr r3,r9 | ||
247 | addi r1,r1,STKFRM | ||
248 | blr | ||
249 | extab 2b,3b | ||
250 | |||
251 | /* Store vector reg N to *p. N is in r3, p in r4. */ | ||
252 | _GLOBAL(do_stvx) | ||
253 | PPC_STLU r1,-STKFRM(r1) | ||
254 | mflr r0 | ||
255 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | ||
256 | mfmsr r6 | ||
257 | oris r7,r6,MSR_VEC@h | ||
258 | cmpwi cr7,r3,0 | ||
259 | li r8,STKFRM-16 | ||
260 | mtmsrd r7 | ||
261 | isync | ||
262 | beq cr7,1f | ||
263 | stvx vr0,r1,r8 | ||
264 | bl get_vr | ||
265 | 1: li r9,-EFAULT | ||
266 | 2: stvx vr0,0,r4 | ||
267 | li r9,0 | ||
268 | 3: beq cr7,4f | ||
269 | lvx vr0,r1,r8 | ||
270 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | ||
271 | mtlr r0 | ||
272 | mtmsrd r6 | ||
273 | isync | ||
274 | mr r3,r9 | ||
275 | addi r1,r1,STKFRM | ||
276 | blr | ||
277 | extab 2b,3b | ||
278 | #endif /* CONFIG_ALTIVEC */ | ||
279 | |||
280 | #ifdef CONFIG_VSX | ||
281 | /* Get the contents of vsrN into vsr0; N is in r3. */ | ||
282 | _GLOBAL(get_vsr) | ||
283 | mflr r0 | ||
284 | rlwinm r3,r3,3,0x1f8 | ||
285 | bcl 20,31,1f | ||
286 | blr /* vsr0 is already in vsr0 */ | ||
287 | nop | ||
288 | reg = 1 | ||
289 | .rept 63 | ||
290 | XXLOR(0,reg,reg) | ||
291 | blr | ||
292 | reg = reg + 1 | ||
293 | .endr | ||
294 | 1: mflr r5 | ||
295 | add r5,r3,r5 | ||
296 | mtctr r5 | ||
297 | mtlr r0 | ||
298 | bctr | ||
299 | |||
300 | /* Put the contents of vsr0 into vsrN; N is in r3. */ | ||
301 | _GLOBAL(put_vsr) | ||
302 | mflr r0 | ||
303 | rlwinm r3,r3,3,0x1f8 | ||
304 | bcl 20,31,1f | ||
305 | blr /* vr0 is already in vr0 */ | ||
306 | nop | ||
307 | reg = 1 | ||
308 | .rept 63 | ||
309 | XXLOR(reg,0,0) | ||
310 | blr | ||
311 | reg = reg + 1 | ||
312 | .endr | ||
313 | 1: mflr r5 | ||
314 | add r5,r3,r5 | ||
315 | mtctr r5 | ||
316 | mtlr r0 | ||
317 | bctr | ||
318 | |||
319 | /* Load VSX reg N from vector doubleword *p. N is in r3, p in r4. */ | ||
320 | _GLOBAL(do_lxvd2x) | ||
321 | PPC_STLU r1,-STKFRM(r1) | ||
322 | mflr r0 | ||
323 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | ||
324 | mfmsr r6 | ||
325 | oris r7,r6,MSR_VSX@h | ||
326 | cmpwi cr7,r3,0 | ||
327 | li r8,STKFRM-16 | ||
328 | mtmsrd r7 | ||
329 | isync | ||
330 | beq cr7,1f | ||
331 | STXVD2X(0,r1,r8) | ||
332 | 1: li r9,-EFAULT | ||
333 | 2: LXVD2X(0,0,r4) | ||
334 | li r9,0 | ||
335 | 3: beq cr7,4f | ||
336 | bl put_vsr | ||
337 | LXVD2X(0,r1,r8) | ||
338 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | ||
339 | mtlr r0 | ||
340 | mtmsrd r6 | ||
341 | isync | ||
342 | mr r3,r9 | ||
343 | addi r1,r1,STKFRM | ||
344 | blr | ||
345 | extab 2b,3b | ||
346 | |||
347 | /* Store VSX reg N to vector doubleword *p. N is in r3, p in r4. */ | ||
348 | _GLOBAL(do_stxvd2x) | ||
349 | PPC_STLU r1,-STKFRM(r1) | ||
350 | mflr r0 | ||
351 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | ||
352 | mfmsr r6 | ||
353 | oris r7,r6,MSR_VSX@h | ||
354 | cmpwi cr7,r3,0 | ||
355 | li r8,STKFRM-16 | ||
356 | mtmsrd r7 | ||
357 | isync | ||
358 | beq cr7,1f | ||
359 | STXVD2X(0,r1,r8) | ||
360 | bl get_vsr | ||
361 | 1: li r9,-EFAULT | ||
362 | 2: STXVD2X(0,0,r4) | ||
363 | li r9,0 | ||
364 | 3: beq cr7,4f | ||
365 | LXVD2X(0,r1,r8) | ||
366 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | ||
367 | mtlr r0 | ||
368 | mtmsrd r6 | ||
369 | isync | ||
370 | mr r3,r9 | ||
371 | addi r1,r1,STKFRM | ||
372 | blr | ||
373 | extab 2b,3b | ||
374 | |||
375 | #endif /* CONFIG_VSX */ | ||
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 13b7d54f185b..e0a9858d537e 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c | |||
@@ -13,6 +13,8 @@ | |||
13 | #include <linux/ptrace.h> | 13 | #include <linux/ptrace.h> |
14 | #include <asm/sstep.h> | 14 | #include <asm/sstep.h> |
15 | #include <asm/processor.h> | 15 | #include <asm/processor.h> |
16 | #include <asm/uaccess.h> | ||
17 | #include <asm/cputable.h> | ||
16 | 18 | ||
17 | extern char system_call_common[]; | 19 | extern char system_call_common[]; |
18 | 20 | ||
@@ -23,6 +25,23 @@ extern char system_call_common[]; | |||
23 | #define MSR_MASK 0x87c0ffff | 25 | #define MSR_MASK 0x87c0ffff |
24 | #endif | 26 | #endif |
25 | 27 | ||
28 | /* Bits in XER */ | ||
29 | #define XER_SO 0x80000000U | ||
30 | #define XER_OV 0x40000000U | ||
31 | #define XER_CA 0x20000000U | ||
32 | |||
33 | /* | ||
34 | * Functions in ldstfp.S | ||
35 | */ | ||
36 | extern int do_lfs(int rn, unsigned long ea); | ||
37 | extern int do_lfd(int rn, unsigned long ea); | ||
38 | extern int do_stfs(int rn, unsigned long ea); | ||
39 | extern int do_stfd(int rn, unsigned long ea); | ||
40 | extern int do_lvx(int rn, unsigned long ea); | ||
41 | extern int do_stvx(int rn, unsigned long ea); | ||
42 | extern int do_lxvd2x(int rn, unsigned long ea); | ||
43 | extern int do_stxvd2x(int rn, unsigned long ea); | ||
44 | |||
26 | /* | 45 | /* |
27 | * Determine whether a conditional branch instruction would branch. | 46 | * Determine whether a conditional branch instruction would branch. |
28 | */ | 47 | */ |
@@ -46,16 +65,499 @@ static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs) | |||
46 | return 1; | 65 | return 1; |
47 | } | 66 | } |
48 | 67 | ||
68 | |||
69 | static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb) | ||
70 | { | ||
71 | if (!user_mode(regs)) | ||
72 | return 1; | ||
73 | return __access_ok(ea, nb, USER_DS); | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * Calculate effective address for a D-form instruction | ||
78 | */ | ||
79 | static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs) | ||
80 | { | ||
81 | int ra; | ||
82 | unsigned long ea; | ||
83 | |||
84 | ra = (instr >> 16) & 0x1f; | ||
85 | ea = (signed short) instr; /* sign-extend */ | ||
86 | if (ra) { | ||
87 | ea += regs->gpr[ra]; | ||
88 | if (instr & 0x04000000) /* update forms */ | ||
89 | regs->gpr[ra] = ea; | ||
90 | } | ||
91 | #ifdef __powerpc64__ | ||
92 | if (!(regs->msr & MSR_SF)) | ||
93 | ea &= 0xffffffffUL; | ||
94 | #endif | ||
95 | return ea; | ||
96 | } | ||
97 | |||
98 | #ifdef __powerpc64__ | ||
99 | /* | ||
100 | * Calculate effective address for a DS-form instruction | ||
101 | */ | ||
102 | static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs) | ||
103 | { | ||
104 | int ra; | ||
105 | unsigned long ea; | ||
106 | |||
107 | ra = (instr >> 16) & 0x1f; | ||
108 | ea = (signed short) (instr & ~3); /* sign-extend */ | ||
109 | if (ra) { | ||
110 | ea += regs->gpr[ra]; | ||
111 | if ((instr & 3) == 1) /* update forms */ | ||
112 | regs->gpr[ra] = ea; | ||
113 | } | ||
114 | if (!(regs->msr & MSR_SF)) | ||
115 | ea &= 0xffffffffUL; | ||
116 | return ea; | ||
117 | } | ||
118 | #endif /* __powerpc64 */ | ||
119 | |||
120 | /* | ||
121 | * Calculate effective address for an X-form instruction | ||
122 | */ | ||
123 | static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs, | ||
124 | int do_update) | ||
125 | { | ||
126 | int ra, rb; | ||
127 | unsigned long ea; | ||
128 | |||
129 | ra = (instr >> 16) & 0x1f; | ||
130 | rb = (instr >> 11) & 0x1f; | ||
131 | ea = regs->gpr[rb]; | ||
132 | if (ra) { | ||
133 | ea += regs->gpr[ra]; | ||
134 | if (do_update) /* update forms */ | ||
135 | regs->gpr[ra] = ea; | ||
136 | } | ||
137 | #ifdef __powerpc64__ | ||
138 | if (!(regs->msr & MSR_SF)) | ||
139 | ea &= 0xffffffffUL; | ||
140 | #endif | ||
141 | return ea; | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * Return the largest power of 2, not greater than sizeof(unsigned long), | ||
146 | * such that x is a multiple of it. | ||
147 | */ | ||
148 | static inline unsigned long max_align(unsigned long x) | ||
149 | { | ||
150 | x |= sizeof(unsigned long); | ||
151 | return x & -x; /* isolates rightmost bit */ | ||
152 | } | ||
153 | |||
154 | |||
155 | static inline unsigned long byterev_2(unsigned long x) | ||
156 | { | ||
157 | return ((x >> 8) & 0xff) | ((x & 0xff) << 8); | ||
158 | } | ||
159 | |||
160 | static inline unsigned long byterev_4(unsigned long x) | ||
161 | { | ||
162 | return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) | | ||
163 | ((x & 0xff00) << 8) | ((x & 0xff) << 24); | ||
164 | } | ||
165 | |||
166 | #ifdef __powerpc64__ | ||
167 | static inline unsigned long byterev_8(unsigned long x) | ||
168 | { | ||
169 | return (byterev_4(x) << 32) | byterev_4(x >> 32); | ||
170 | } | ||
171 | #endif | ||
172 | |||
173 | static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea, | ||
174 | int nb) | ||
175 | { | ||
176 | int err = 0; | ||
177 | unsigned long x = 0; | ||
178 | |||
179 | switch (nb) { | ||
180 | case 1: | ||
181 | err = __get_user(x, (unsigned char __user *) ea); | ||
182 | break; | ||
183 | case 2: | ||
184 | err = __get_user(x, (unsigned short __user *) ea); | ||
185 | break; | ||
186 | case 4: | ||
187 | err = __get_user(x, (unsigned int __user *) ea); | ||
188 | break; | ||
189 | #ifdef __powerpc64__ | ||
190 | case 8: | ||
191 | err = __get_user(x, (unsigned long __user *) ea); | ||
192 | break; | ||
193 | #endif | ||
194 | } | ||
195 | if (!err) | ||
196 | *dest = x; | ||
197 | return err; | ||
198 | } | ||
199 | |||
200 | static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea, | ||
201 | int nb, struct pt_regs *regs) | ||
202 | { | ||
203 | int err; | ||
204 | unsigned long x, b, c; | ||
205 | |||
206 | /* unaligned, do this in pieces */ | ||
207 | x = 0; | ||
208 | for (; nb > 0; nb -= c) { | ||
209 | c = max_align(ea); | ||
210 | if (c > nb) | ||
211 | c = max_align(nb); | ||
212 | err = read_mem_aligned(&b, ea, c); | ||
213 | if (err) | ||
214 | return err; | ||
215 | x = (x << (8 * c)) + b; | ||
216 | ea += c; | ||
217 | } | ||
218 | *dest = x; | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * Read memory at address ea for nb bytes, return 0 for success | ||
224 | * or -EFAULT if an error occurred. | ||
225 | */ | ||
226 | static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb, | ||
227 | struct pt_regs *regs) | ||
228 | { | ||
229 | if (!address_ok(regs, ea, nb)) | ||
230 | return -EFAULT; | ||
231 | if ((ea & (nb - 1)) == 0) | ||
232 | return read_mem_aligned(dest, ea, nb); | ||
233 | return read_mem_unaligned(dest, ea, nb, regs); | ||
234 | } | ||
235 | |||
236 | static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea, | ||
237 | int nb) | ||
238 | { | ||
239 | int err = 0; | ||
240 | |||
241 | switch (nb) { | ||
242 | case 1: | ||
243 | err = __put_user(val, (unsigned char __user *) ea); | ||
244 | break; | ||
245 | case 2: | ||
246 | err = __put_user(val, (unsigned short __user *) ea); | ||
247 | break; | ||
248 | case 4: | ||
249 | err = __put_user(val, (unsigned int __user *) ea); | ||
250 | break; | ||
251 | #ifdef __powerpc64__ | ||
252 | case 8: | ||
253 | err = __put_user(val, (unsigned long __user *) ea); | ||
254 | break; | ||
255 | #endif | ||
256 | } | ||
257 | return err; | ||
258 | } | ||
259 | |||
260 | static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea, | ||
261 | int nb, struct pt_regs *regs) | ||
262 | { | ||
263 | int err; | ||
264 | unsigned long c; | ||
265 | |||
266 | /* unaligned or little-endian, do this in pieces */ | ||
267 | for (; nb > 0; nb -= c) { | ||
268 | c = max_align(ea); | ||
269 | if (c > nb) | ||
270 | c = max_align(nb); | ||
271 | err = write_mem_aligned(val >> (nb - c) * 8, ea, c); | ||
272 | if (err) | ||
273 | return err; | ||
274 | ++ea; | ||
275 | } | ||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * Write memory at address ea for nb bytes, return 0 for success | ||
281 | * or -EFAULT if an error occurred. | ||
282 | */ | ||
283 | static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb, | ||
284 | struct pt_regs *regs) | ||
285 | { | ||
286 | if (!address_ok(regs, ea, nb)) | ||
287 | return -EFAULT; | ||
288 | if ((ea & (nb - 1)) == 0) | ||
289 | return write_mem_aligned(val, ea, nb); | ||
290 | return write_mem_unaligned(val, ea, nb, regs); | ||
291 | } | ||
292 | |||
49 | /* | 293 | /* |
50 | * Emulate instructions that cause a transfer of control. | 294 | * Check the address and alignment, and call func to do the actual |
295 | * load or store. | ||
296 | */ | ||
297 | static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long), | ||
298 | unsigned long ea, int nb, | ||
299 | struct pt_regs *regs) | ||
300 | { | ||
301 | int err; | ||
302 | unsigned long val[sizeof(double) / sizeof(long)]; | ||
303 | unsigned long ptr; | ||
304 | |||
305 | if (!address_ok(regs, ea, nb)) | ||
306 | return -EFAULT; | ||
307 | if ((ea & 3) == 0) | ||
308 | return (*func)(rn, ea); | ||
309 | ptr = (unsigned long) &val[0]; | ||
310 | if (sizeof(unsigned long) == 8 || nb == 4) { | ||
311 | err = read_mem_unaligned(&val[0], ea, nb, regs); | ||
312 | ptr += sizeof(unsigned long) - nb; | ||
313 | } else { | ||
314 | /* reading a double on 32-bit */ | ||
315 | err = read_mem_unaligned(&val[0], ea, 4, regs); | ||
316 | if (!err) | ||
317 | err = read_mem_unaligned(&val[1], ea + 4, 4, regs); | ||
318 | } | ||
319 | if (err) | ||
320 | return err; | ||
321 | return (*func)(rn, ptr); | ||
322 | } | ||
323 | |||
324 | static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long), | ||
325 | unsigned long ea, int nb, | ||
326 | struct pt_regs *regs) | ||
327 | { | ||
328 | int err; | ||
329 | unsigned long val[sizeof(double) / sizeof(long)]; | ||
330 | unsigned long ptr; | ||
331 | |||
332 | if (!address_ok(regs, ea, nb)) | ||
333 | return -EFAULT; | ||
334 | if ((ea & 3) == 0) | ||
335 | return (*func)(rn, ea); | ||
336 | ptr = (unsigned long) &val[0]; | ||
337 | if (sizeof(unsigned long) == 8 || nb == 4) { | ||
338 | ptr += sizeof(unsigned long) - nb; | ||
339 | err = (*func)(rn, ptr); | ||
340 | if (err) | ||
341 | return err; | ||
342 | err = write_mem_unaligned(val[0], ea, nb, regs); | ||
343 | } else { | ||
344 | /* writing a double on 32-bit */ | ||
345 | err = (*func)(rn, ptr); | ||
346 | if (err) | ||
347 | return err; | ||
348 | err = write_mem_unaligned(val[0], ea, 4, regs); | ||
349 | if (!err) | ||
350 | err = write_mem_unaligned(val[1], ea + 4, 4, regs); | ||
351 | } | ||
352 | return err; | ||
353 | } | ||
354 | |||
355 | #ifdef CONFIG_ALTIVEC | ||
356 | /* For Altivec/VMX, no need to worry about alignment */ | ||
357 | static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long), | ||
358 | unsigned long ea, struct pt_regs *regs) | ||
359 | { | ||
360 | if (!address_ok(regs, ea & ~0xfUL, 16)) | ||
361 | return -EFAULT; | ||
362 | return (*func)(rn, ea); | ||
363 | } | ||
364 | |||
365 | static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long), | ||
366 | unsigned long ea, struct pt_regs *regs) | ||
367 | { | ||
368 | if (!address_ok(regs, ea & ~0xfUL, 16)) | ||
369 | return -EFAULT; | ||
370 | return (*func)(rn, ea); | ||
371 | } | ||
372 | #endif /* CONFIG_ALTIVEC */ | ||
373 | |||
374 | #ifdef CONFIG_VSX | ||
375 | static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long), | ||
376 | unsigned long ea, struct pt_regs *regs) | ||
377 | { | ||
378 | int err; | ||
379 | unsigned long val[2]; | ||
380 | |||
381 | if (!address_ok(regs, ea, 16)) | ||
382 | return -EFAULT; | ||
383 | if ((ea & 3) == 0) | ||
384 | return (*func)(rn, ea); | ||
385 | err = read_mem_unaligned(&val[0], ea, 8, regs); | ||
386 | if (!err) | ||
387 | err = read_mem_unaligned(&val[1], ea + 8, 8, regs); | ||
388 | if (!err) | ||
389 | err = (*func)(rn, (unsigned long) &val[0]); | ||
390 | return err; | ||
391 | } | ||
392 | |||
393 | static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long), | ||
394 | unsigned long ea, struct pt_regs *regs) | ||
395 | { | ||
396 | int err; | ||
397 | unsigned long val[2]; | ||
398 | |||
399 | if (!address_ok(regs, ea, 16)) | ||
400 | return -EFAULT; | ||
401 | if ((ea & 3) == 0) | ||
402 | return (*func)(rn, ea); | ||
403 | err = (*func)(rn, (unsigned long) &val[0]); | ||
404 | if (err) | ||
405 | return err; | ||
406 | err = write_mem_unaligned(val[0], ea, 8, regs); | ||
407 | if (!err) | ||
408 | err = write_mem_unaligned(val[1], ea + 8, 8, regs); | ||
409 | return err; | ||
410 | } | ||
411 | #endif /* CONFIG_VSX */ | ||
412 | |||
413 | #define __put_user_asmx(x, addr, err, op, cr) \ | ||
414 | __asm__ __volatile__( \ | ||
415 | "1: " op " %2,0,%3\n" \ | ||
416 | " mfcr %1\n" \ | ||
417 | "2:\n" \ | ||
418 | ".section .fixup,\"ax\"\n" \ | ||
419 | "3: li %0,%4\n" \ | ||
420 | " b 2b\n" \ | ||
421 | ".previous\n" \ | ||
422 | ".section __ex_table,\"a\"\n" \ | ||
423 | PPC_LONG_ALIGN "\n" \ | ||
424 | PPC_LONG "1b,3b\n" \ | ||
425 | ".previous" \ | ||
426 | : "=r" (err), "=r" (cr) \ | ||
427 | : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)) | ||
428 | |||
429 | #define __get_user_asmx(x, addr, err, op) \ | ||
430 | __asm__ __volatile__( \ | ||
431 | "1: "op" %1,0,%2\n" \ | ||
432 | "2:\n" \ | ||
433 | ".section .fixup,\"ax\"\n" \ | ||
434 | "3: li %0,%3\n" \ | ||
435 | " b 2b\n" \ | ||
436 | ".previous\n" \ | ||
437 | ".section __ex_table,\"a\"\n" \ | ||
438 | PPC_LONG_ALIGN "\n" \ | ||
439 | PPC_LONG "1b,3b\n" \ | ||
440 | ".previous" \ | ||
441 | : "=r" (err), "=r" (x) \ | ||
442 | : "r" (addr), "i" (-EFAULT), "0" (err)) | ||
443 | |||
444 | #define __cacheop_user_asmx(addr, err, op) \ | ||
445 | __asm__ __volatile__( \ | ||
446 | "1: "op" 0,%1\n" \ | ||
447 | "2:\n" \ | ||
448 | ".section .fixup,\"ax\"\n" \ | ||
449 | "3: li %0,%3\n" \ | ||
450 | " b 2b\n" \ | ||
451 | ".previous\n" \ | ||
452 | ".section __ex_table,\"a\"\n" \ | ||
453 | PPC_LONG_ALIGN "\n" \ | ||
454 | PPC_LONG "1b,3b\n" \ | ||
455 | ".previous" \ | ||
456 | : "=r" (err) \ | ||
457 | : "r" (addr), "i" (-EFAULT), "0" (err)) | ||
458 | |||
459 | static void __kprobes set_cr0(struct pt_regs *regs, int rd) | ||
460 | { | ||
461 | long val = regs->gpr[rd]; | ||
462 | |||
463 | regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); | ||
464 | #ifdef __powerpc64__ | ||
465 | if (!(regs->msr & MSR_SF)) | ||
466 | val = (int) val; | ||
467 | #endif | ||
468 | if (val < 0) | ||
469 | regs->ccr |= 0x80000000; | ||
470 | else if (val > 0) | ||
471 | regs->ccr |= 0x40000000; | ||
472 | else | ||
473 | regs->ccr |= 0x20000000; | ||
474 | } | ||
475 | |||
476 | static void __kprobes add_with_carry(struct pt_regs *regs, int rd, | ||
477 | unsigned long val1, unsigned long val2, | ||
478 | unsigned long carry_in) | ||
479 | { | ||
480 | unsigned long val = val1 + val2; | ||
481 | |||
482 | if (carry_in) | ||
483 | ++val; | ||
484 | regs->gpr[rd] = val; | ||
485 | #ifdef __powerpc64__ | ||
486 | if (!(regs->msr & MSR_SF)) { | ||
487 | val = (unsigned int) val; | ||
488 | val1 = (unsigned int) val1; | ||
489 | } | ||
490 | #endif | ||
491 | if (val < val1 || (carry_in && val == val1)) | ||
492 | regs->xer |= XER_CA; | ||
493 | else | ||
494 | regs->xer &= ~XER_CA; | ||
495 | } | ||
496 | |||
497 | static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2, | ||
498 | int crfld) | ||
499 | { | ||
500 | unsigned int crval, shift; | ||
501 | |||
502 | crval = (regs->xer >> 31) & 1; /* get SO bit */ | ||
503 | if (v1 < v2) | ||
504 | crval |= 8; | ||
505 | else if (v1 > v2) | ||
506 | crval |= 4; | ||
507 | else | ||
508 | crval |= 2; | ||
509 | shift = (7 - crfld) * 4; | ||
510 | regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift); | ||
511 | } | ||
512 | |||
513 | static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1, | ||
514 | unsigned long v2, int crfld) | ||
515 | { | ||
516 | unsigned int crval, shift; | ||
517 | |||
518 | crval = (regs->xer >> 31) & 1; /* get SO bit */ | ||
519 | if (v1 < v2) | ||
520 | crval |= 8; | ||
521 | else if (v1 > v2) | ||
522 | crval |= 4; | ||
523 | else | ||
524 | crval |= 2; | ||
525 | shift = (7 - crfld) * 4; | ||
526 | regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift); | ||
527 | } | ||
528 | |||
529 | /* | ||
530 | * Elements of 32-bit rotate and mask instructions. | ||
531 | */ | ||
532 | #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \ | ||
533 | ((signed long)-0x80000000L >> (me)) + ((me) >= (mb))) | ||
534 | #ifdef __powerpc64__ | ||
535 | #define MASK64_L(mb) (~0UL >> (mb)) | ||
536 | #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me)) | ||
537 | #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb))) | ||
538 | #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32)) | ||
539 | #else | ||
540 | #define DATA32(x) (x) | ||
541 | #endif | ||
542 | #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x)) | ||
543 | |||
544 | /* | ||
545 | * Emulate instructions that cause a transfer of control, | ||
546 | * loads and stores, and a few other instructions. | ||
51 | * Returns 1 if the step was emulated, 0 if not, | 547 | * Returns 1 if the step was emulated, 0 if not, |
52 | * or -1 if the instruction is one that should not be stepped, | 548 | * or -1 if the instruction is one that should not be stepped, |
53 | * such as an rfid, or a mtmsrd that would clear MSR_RI. | 549 | * such as an rfid, or a mtmsrd that would clear MSR_RI. |
54 | */ | 550 | */ |
55 | int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | 551 | int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) |
56 | { | 552 | { |
57 | unsigned int opcode, rs, rb, rd, spr; | 553 | unsigned int opcode, ra, rb, rd, spr, u; |
58 | unsigned long int imm; | 554 | unsigned long int imm; |
555 | unsigned long int val, val2; | ||
556 | unsigned long int ea; | ||
557 | unsigned int cr, mb, me, sh; | ||
558 | int err; | ||
559 | unsigned long old_ra; | ||
560 | long ival; | ||
59 | 561 | ||
60 | opcode = instr >> 26; | 562 | opcode = instr >> 26; |
61 | switch (opcode) { | 563 | switch (opcode) { |
@@ -78,7 +580,13 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
78 | * entry code works. If that is changed, this will | 580 | * entry code works. If that is changed, this will |
79 | * need to be changed also. | 581 | * need to be changed also. |
80 | */ | 582 | */ |
583 | if (regs->gpr[0] == 0x1ebe && | ||
584 | cpu_has_feature(CPU_FTR_REAL_LE)) { | ||
585 | regs->msr ^= MSR_LE; | ||
586 | goto instr_done; | ||
587 | } | ||
81 | regs->gpr[9] = regs->gpr[13]; | 588 | regs->gpr[9] = regs->gpr[13]; |
589 | regs->gpr[10] = MSR_KERNEL; | ||
82 | regs->gpr[11] = regs->nip + 4; | 590 | regs->gpr[11] = regs->nip + 4; |
83 | regs->gpr[12] = regs->msr & MSR_MASK; | 591 | regs->gpr[12] = regs->msr & MSR_MASK; |
84 | regs->gpr[13] = (unsigned long) get_paca(); | 592 | regs->gpr[13] = (unsigned long) get_paca(); |
@@ -102,9 +610,9 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
102 | regs->nip = imm; | 610 | regs->nip = imm; |
103 | return 1; | 611 | return 1; |
104 | case 19: | 612 | case 19: |
105 | switch (instr & 0x7fe) { | 613 | switch ((instr >> 1) & 0x3ff) { |
106 | case 0x20: /* bclr */ | 614 | case 16: /* bclr */ |
107 | case 0x420: /* bcctr */ | 615 | case 528: /* bcctr */ |
108 | imm = (instr & 0x400)? regs->ctr: regs->link; | 616 | imm = (instr & 0x400)? regs->ctr: regs->link; |
109 | regs->nip += 4; | 617 | regs->nip += 4; |
110 | if ((regs->msr & MSR_SF) == 0) { | 618 | if ((regs->msr & MSR_SF) == 0) { |
@@ -116,30 +624,233 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
116 | if (branch_taken(instr, regs)) | 624 | if (branch_taken(instr, regs)) |
117 | regs->nip = imm; | 625 | regs->nip = imm; |
118 | return 1; | 626 | return 1; |
119 | case 0x24: /* rfid, scary */ | 627 | |
628 | case 18: /* rfid, scary */ | ||
120 | return -1; | 629 | return -1; |
630 | |||
631 | case 150: /* isync */ | ||
632 | isync(); | ||
633 | goto instr_done; | ||
634 | |||
635 | case 33: /* crnor */ | ||
636 | case 129: /* crandc */ | ||
637 | case 193: /* crxor */ | ||
638 | case 225: /* crnand */ | ||
639 | case 257: /* crand */ | ||
640 | case 289: /* creqv */ | ||
641 | case 417: /* crorc */ | ||
642 | case 449: /* cror */ | ||
643 | ra = (instr >> 16) & 0x1f; | ||
644 | rb = (instr >> 11) & 0x1f; | ||
645 | rd = (instr >> 21) & 0x1f; | ||
646 | ra = (regs->ccr >> (31 - ra)) & 1; | ||
647 | rb = (regs->ccr >> (31 - rb)) & 1; | ||
648 | val = (instr >> (6 + ra * 2 + rb)) & 1; | ||
649 | regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) | | ||
650 | (val << (31 - rd)); | ||
651 | goto instr_done; | ||
652 | } | ||
653 | break; | ||
654 | case 31: | ||
655 | switch ((instr >> 1) & 0x3ff) { | ||
656 | case 598: /* sync */ | ||
657 | #ifdef __powerpc64__ | ||
658 | switch ((instr >> 21) & 3) { | ||
659 | case 1: /* lwsync */ | ||
660 | asm volatile("lwsync" : : : "memory"); | ||
661 | goto instr_done; | ||
662 | case 2: /* ptesync */ | ||
663 | asm volatile("ptesync" : : : "memory"); | ||
664 | goto instr_done; | ||
665 | } | ||
666 | #endif | ||
667 | mb(); | ||
668 | goto instr_done; | ||
669 | |||
670 | case 854: /* eieio */ | ||
671 | eieio(); | ||
672 | goto instr_done; | ||
673 | } | ||
674 | break; | ||
675 | } | ||
676 | |||
677 | /* Following cases refer to regs->gpr[], so we need all regs */ | ||
678 | if (!FULL_REGS(regs)) | ||
679 | return 0; | ||
680 | |||
681 | rd = (instr >> 21) & 0x1f; | ||
682 | ra = (instr >> 16) & 0x1f; | ||
683 | rb = (instr >> 11) & 0x1f; | ||
684 | |||
685 | switch (opcode) { | ||
686 | case 7: /* mulli */ | ||
687 | regs->gpr[rd] = regs->gpr[ra] * (short) instr; | ||
688 | goto instr_done; | ||
689 | |||
690 | case 8: /* subfic */ | ||
691 | imm = (short) instr; | ||
692 | add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1); | ||
693 | goto instr_done; | ||
694 | |||
695 | case 10: /* cmpli */ | ||
696 | imm = (unsigned short) instr; | ||
697 | val = regs->gpr[ra]; | ||
698 | #ifdef __powerpc64__ | ||
699 | if ((rd & 1) == 0) | ||
700 | val = (unsigned int) val; | ||
701 | #endif | ||
702 | do_cmp_unsigned(regs, val, imm, rd >> 2); | ||
703 | goto instr_done; | ||
704 | |||
705 | case 11: /* cmpi */ | ||
706 | imm = (short) instr; | ||
707 | val = regs->gpr[ra]; | ||
708 | #ifdef __powerpc64__ | ||
709 | if ((rd & 1) == 0) | ||
710 | val = (int) val; | ||
711 | #endif | ||
712 | do_cmp_signed(regs, val, imm, rd >> 2); | ||
713 | goto instr_done; | ||
714 | |||
715 | case 12: /* addic */ | ||
716 | imm = (short) instr; | ||
717 | add_with_carry(regs, rd, regs->gpr[ra], imm, 0); | ||
718 | goto instr_done; | ||
719 | |||
720 | case 13: /* addic. */ | ||
721 | imm = (short) instr; | ||
722 | add_with_carry(regs, rd, regs->gpr[ra], imm, 0); | ||
723 | set_cr0(regs, rd); | ||
724 | goto instr_done; | ||
725 | |||
726 | case 14: /* addi */ | ||
727 | imm = (short) instr; | ||
728 | if (ra) | ||
729 | imm += regs->gpr[ra]; | ||
730 | regs->gpr[rd] = imm; | ||
731 | goto instr_done; | ||
732 | |||
733 | case 15: /* addis */ | ||
734 | imm = ((short) instr) << 16; | ||
735 | if (ra) | ||
736 | imm += regs->gpr[ra]; | ||
737 | regs->gpr[rd] = imm; | ||
738 | goto instr_done; | ||
739 | |||
740 | case 20: /* rlwimi */ | ||
741 | mb = (instr >> 6) & 0x1f; | ||
742 | me = (instr >> 1) & 0x1f; | ||
743 | val = DATA32(regs->gpr[rd]); | ||
744 | imm = MASK32(mb, me); | ||
745 | regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm); | ||
746 | goto logical_done; | ||
747 | |||
748 | case 21: /* rlwinm */ | ||
749 | mb = (instr >> 6) & 0x1f; | ||
750 | me = (instr >> 1) & 0x1f; | ||
751 | val = DATA32(regs->gpr[rd]); | ||
752 | regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me); | ||
753 | goto logical_done; | ||
754 | |||
755 | case 23: /* rlwnm */ | ||
756 | mb = (instr >> 6) & 0x1f; | ||
757 | me = (instr >> 1) & 0x1f; | ||
758 | rb = regs->gpr[rb] & 0x1f; | ||
759 | val = DATA32(regs->gpr[rd]); | ||
760 | regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me); | ||
761 | goto logical_done; | ||
762 | |||
763 | case 24: /* ori */ | ||
764 | imm = (unsigned short) instr; | ||
765 | regs->gpr[ra] = regs->gpr[rd] | imm; | ||
766 | goto instr_done; | ||
767 | |||
768 | case 25: /* oris */ | ||
769 | imm = (unsigned short) instr; | ||
770 | regs->gpr[ra] = regs->gpr[rd] | (imm << 16); | ||
771 | goto instr_done; | ||
772 | |||
773 | case 26: /* xori */ | ||
774 | imm = (unsigned short) instr; | ||
775 | regs->gpr[ra] = regs->gpr[rd] ^ imm; | ||
776 | goto instr_done; | ||
777 | |||
778 | case 27: /* xoris */ | ||
779 | imm = (unsigned short) instr; | ||
780 | regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16); | ||
781 | goto instr_done; | ||
782 | |||
783 | case 28: /* andi. */ | ||
784 | imm = (unsigned short) instr; | ||
785 | regs->gpr[ra] = regs->gpr[rd] & imm; | ||
786 | set_cr0(regs, ra); | ||
787 | goto instr_done; | ||
788 | |||
789 | case 29: /* andis. */ | ||
790 | imm = (unsigned short) instr; | ||
791 | regs->gpr[ra] = regs->gpr[rd] & (imm << 16); | ||
792 | set_cr0(regs, ra); | ||
793 | goto instr_done; | ||
794 | |||
795 | #ifdef __powerpc64__ | ||
796 | case 30: /* rld* */ | ||
797 | mb = ((instr >> 6) & 0x1f) | (instr & 0x20); | ||
798 | val = regs->gpr[rd]; | ||
799 | if ((instr & 0x10) == 0) { | ||
800 | sh = rb | ((instr & 2) << 4); | ||
801 | val = ROTATE(val, sh); | ||
802 | switch ((instr >> 2) & 3) { | ||
803 | case 0: /* rldicl */ | ||
804 | regs->gpr[ra] = val & MASK64_L(mb); | ||
805 | goto logical_done; | ||
806 | case 1: /* rldicr */ | ||
807 | regs->gpr[ra] = val & MASK64_R(mb); | ||
808 | goto logical_done; | ||
809 | case 2: /* rldic */ | ||
810 | regs->gpr[ra] = val & MASK64(mb, 63 - sh); | ||
811 | goto logical_done; | ||
812 | case 3: /* rldimi */ | ||
813 | imm = MASK64(mb, 63 - sh); | ||
814 | regs->gpr[ra] = (regs->gpr[ra] & ~imm) | | ||
815 | (val & imm); | ||
816 | goto logical_done; | ||
817 | } | ||
818 | } else { | ||
819 | sh = regs->gpr[rb] & 0x3f; | ||
820 | val = ROTATE(val, sh); | ||
821 | switch ((instr >> 1) & 7) { | ||
822 | case 0: /* rldcl */ | ||
823 | regs->gpr[ra] = val & MASK64_L(mb); | ||
824 | goto logical_done; | ||
825 | case 1: /* rldcr */ | ||
826 | regs->gpr[ra] = val & MASK64_R(mb); | ||
827 | goto logical_done; | ||
828 | } | ||
121 | } | 829 | } |
830 | #endif | ||
831 | |||
122 | case 31: | 832 | case 31: |
123 | rd = (instr >> 21) & 0x1f; | 833 | switch ((instr >> 1) & 0x3ff) { |
124 | switch (instr & 0x7fe) { | 834 | case 83: /* mfmsr */ |
125 | case 0xa6: /* mfmsr */ | 835 | if (regs->msr & MSR_PR) |
836 | break; | ||
126 | regs->gpr[rd] = regs->msr & MSR_MASK; | 837 | regs->gpr[rd] = regs->msr & MSR_MASK; |
127 | regs->nip += 4; | 838 | goto instr_done; |
128 | if ((regs->msr & MSR_SF) == 0) | 839 | case 146: /* mtmsr */ |
129 | regs->nip &= 0xffffffffUL; | 840 | if (regs->msr & MSR_PR) |
130 | return 1; | 841 | break; |
131 | case 0x124: /* mtmsr */ | ||
132 | imm = regs->gpr[rd]; | 842 | imm = regs->gpr[rd]; |
133 | if ((imm & MSR_RI) == 0) | 843 | if ((imm & MSR_RI) == 0) |
134 | /* can't step mtmsr that would clear MSR_RI */ | 844 | /* can't step mtmsr that would clear MSR_RI */ |
135 | return -1; | 845 | return -1; |
136 | regs->msr = imm; | 846 | regs->msr = imm; |
137 | regs->nip += 4; | 847 | goto instr_done; |
138 | return 1; | ||
139 | #ifdef CONFIG_PPC64 | 848 | #ifdef CONFIG_PPC64 |
140 | case 0x164: /* mtmsrd */ | 849 | case 178: /* mtmsrd */ |
141 | /* only MSR_EE and MSR_RI get changed if bit 15 set */ | 850 | /* only MSR_EE and MSR_RI get changed if bit 15 set */ |
142 | /* mtmsrd doesn't change MSR_HV and MSR_ME */ | 851 | /* mtmsrd doesn't change MSR_HV and MSR_ME */ |
852 | if (regs->msr & MSR_PR) | ||
853 | break; | ||
143 | imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL; | 854 | imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL; |
144 | imm = (regs->msr & MSR_MASK & ~imm) | 855 | imm = (regs->msr & MSR_MASK & ~imm) |
145 | | (regs->gpr[rd] & imm); | 856 | | (regs->gpr[rd] & imm); |
@@ -147,57 +858,770 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
147 | /* can't step mtmsrd that would clear MSR_RI */ | 858 | /* can't step mtmsrd that would clear MSR_RI */ |
148 | return -1; | 859 | return -1; |
149 | regs->msr = imm; | 860 | regs->msr = imm; |
150 | regs->nip += 4; | 861 | goto instr_done; |
151 | if ((imm & MSR_SF) == 0) | ||
152 | regs->nip &= 0xffffffffUL; | ||
153 | return 1; | ||
154 | #endif | 862 | #endif |
155 | case 0x26: /* mfcr */ | 863 | case 19: /* mfcr */ |
156 | regs->gpr[rd] = regs->ccr; | 864 | regs->gpr[rd] = regs->ccr; |
157 | regs->gpr[rd] &= 0xffffffffUL; | 865 | regs->gpr[rd] &= 0xffffffffUL; |
158 | goto mtspr_out; | 866 | goto instr_done; |
159 | case 0x2a6: /* mfspr */ | 867 | |
868 | case 144: /* mtcrf */ | ||
869 | imm = 0xf0000000UL; | ||
870 | val = regs->gpr[rd]; | ||
871 | for (sh = 0; sh < 8; ++sh) { | ||
872 | if (instr & (0x80000 >> sh)) | ||
873 | regs->ccr = (regs->ccr & ~imm) | | ||
874 | (val & imm); | ||
875 | imm >>= 4; | ||
876 | } | ||
877 | goto instr_done; | ||
878 | |||
879 | case 339: /* mfspr */ | ||
160 | spr = (instr >> 11) & 0x3ff; | 880 | spr = (instr >> 11) & 0x3ff; |
161 | switch (spr) { | 881 | switch (spr) { |
162 | case 0x20: /* mfxer */ | 882 | case 0x20: /* mfxer */ |
163 | regs->gpr[rd] = regs->xer; | 883 | regs->gpr[rd] = regs->xer; |
164 | regs->gpr[rd] &= 0xffffffffUL; | 884 | regs->gpr[rd] &= 0xffffffffUL; |
165 | goto mtspr_out; | 885 | goto instr_done; |
166 | case 0x100: /* mflr */ | 886 | case 0x100: /* mflr */ |
167 | regs->gpr[rd] = regs->link; | 887 | regs->gpr[rd] = regs->link; |
168 | goto mtspr_out; | 888 | goto instr_done; |
169 | case 0x120: /* mfctr */ | 889 | case 0x120: /* mfctr */ |
170 | regs->gpr[rd] = regs->ctr; | 890 | regs->gpr[rd] = regs->ctr; |
171 | goto mtspr_out; | 891 | goto instr_done; |
172 | } | ||
173 | break; | ||
174 | case 0x378: /* orx */ | ||
175 | if (instr & 1) | ||
176 | break; | ||
177 | rs = (instr >> 21) & 0x1f; | ||
178 | rb = (instr >> 11) & 0x1f; | ||
179 | if (rs == rb) { /* mr */ | ||
180 | rd = (instr >> 16) & 0x1f; | ||
181 | regs->gpr[rd] = regs->gpr[rs]; | ||
182 | goto mtspr_out; | ||
183 | } | 892 | } |
184 | break; | 893 | break; |
185 | case 0x3a6: /* mtspr */ | 894 | |
895 | case 467: /* mtspr */ | ||
186 | spr = (instr >> 11) & 0x3ff; | 896 | spr = (instr >> 11) & 0x3ff; |
187 | switch (spr) { | 897 | switch (spr) { |
188 | case 0x20: /* mtxer */ | 898 | case 0x20: /* mtxer */ |
189 | regs->xer = (regs->gpr[rd] & 0xffffffffUL); | 899 | regs->xer = (regs->gpr[rd] & 0xffffffffUL); |
190 | goto mtspr_out; | 900 | goto instr_done; |
191 | case 0x100: /* mtlr */ | 901 | case 0x100: /* mtlr */ |
192 | regs->link = regs->gpr[rd]; | 902 | regs->link = regs->gpr[rd]; |
193 | goto mtspr_out; | 903 | goto instr_done; |
194 | case 0x120: /* mtctr */ | 904 | case 0x120: /* mtctr */ |
195 | regs->ctr = regs->gpr[rd]; | 905 | regs->ctr = regs->gpr[rd]; |
196 | mtspr_out: | 906 | goto instr_done; |
197 | regs->nip += 4; | ||
198 | return 1; | ||
199 | } | 907 | } |
908 | break; | ||
909 | |||
910 | /* | ||
911 | * Compare instructions | ||
912 | */ | ||
913 | case 0: /* cmp */ | ||
914 | val = regs->gpr[ra]; | ||
915 | val2 = regs->gpr[rb]; | ||
916 | #ifdef __powerpc64__ | ||
917 | if ((rd & 1) == 0) { | ||
918 | /* word (32-bit) compare */ | ||
919 | val = (int) val; | ||
920 | val2 = (int) val2; | ||
921 | } | ||
922 | #endif | ||
923 | do_cmp_signed(regs, val, val2, rd >> 2); | ||
924 | goto instr_done; | ||
925 | |||
926 | case 32: /* cmpl */ | ||
927 | val = regs->gpr[ra]; | ||
928 | val2 = regs->gpr[rb]; | ||
929 | #ifdef __powerpc64__ | ||
930 | if ((rd & 1) == 0) { | ||
931 | /* word (32-bit) compare */ | ||
932 | val = (unsigned int) val; | ||
933 | val2 = (unsigned int) val2; | ||
934 | } | ||
935 | #endif | ||
936 | do_cmp_unsigned(regs, val, val2, rd >> 2); | ||
937 | goto instr_done; | ||
938 | |||
939 | /* | ||
940 | * Arithmetic instructions | ||
941 | */ | ||
942 | case 8: /* subfc */ | ||
943 | add_with_carry(regs, rd, ~regs->gpr[ra], | ||
944 | regs->gpr[rb], 1); | ||
945 | goto arith_done; | ||
946 | #ifdef __powerpc64__ | ||
947 | case 9: /* mulhdu */ | ||
948 | asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) : | ||
949 | "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); | ||
950 | goto arith_done; | ||
951 | #endif | ||
952 | case 10: /* addc */ | ||
953 | add_with_carry(regs, rd, regs->gpr[ra], | ||
954 | regs->gpr[rb], 0); | ||
955 | goto arith_done; | ||
956 | |||
957 | case 11: /* mulhwu */ | ||
958 | asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) : | ||
959 | "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); | ||
960 | goto arith_done; | ||
961 | |||
962 | case 40: /* subf */ | ||
963 | regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra]; | ||
964 | goto arith_done; | ||
965 | #ifdef __powerpc64__ | ||
966 | case 73: /* mulhd */ | ||
967 | asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) : | ||
968 | "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); | ||
969 | goto arith_done; | ||
970 | #endif | ||
971 | case 75: /* mulhw */ | ||
972 | asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) : | ||
973 | "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); | ||
974 | goto arith_done; | ||
975 | |||
976 | case 104: /* neg */ | ||
977 | regs->gpr[rd] = -regs->gpr[ra]; | ||
978 | goto arith_done; | ||
979 | |||
980 | case 136: /* subfe */ | ||
981 | add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb], | ||
982 | regs->xer & XER_CA); | ||
983 | goto arith_done; | ||
984 | |||
985 | case 138: /* adde */ | ||
986 | add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb], | ||
987 | regs->xer & XER_CA); | ||
988 | goto arith_done; | ||
989 | |||
990 | case 200: /* subfze */ | ||
991 | add_with_carry(regs, rd, ~regs->gpr[ra], 0L, | ||
992 | regs->xer & XER_CA); | ||
993 | goto arith_done; | ||
994 | |||
995 | case 202: /* addze */ | ||
996 | add_with_carry(regs, rd, regs->gpr[ra], 0L, | ||
997 | regs->xer & XER_CA); | ||
998 | goto arith_done; | ||
999 | |||
1000 | case 232: /* subfme */ | ||
1001 | add_with_carry(regs, rd, ~regs->gpr[ra], -1L, | ||
1002 | regs->xer & XER_CA); | ||
1003 | goto arith_done; | ||
1004 | #ifdef __powerpc64__ | ||
1005 | case 233: /* mulld */ | ||
1006 | regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb]; | ||
1007 | goto arith_done; | ||
1008 | #endif | ||
1009 | case 234: /* addme */ | ||
1010 | add_with_carry(regs, rd, regs->gpr[ra], -1L, | ||
1011 | regs->xer & XER_CA); | ||
1012 | goto arith_done; | ||
1013 | |||
1014 | case 235: /* mullw */ | ||
1015 | regs->gpr[rd] = (unsigned int) regs->gpr[ra] * | ||
1016 | (unsigned int) regs->gpr[rb]; | ||
1017 | goto arith_done; | ||
1018 | |||
1019 | case 266: /* add */ | ||
1020 | regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb]; | ||
1021 | goto arith_done; | ||
1022 | #ifdef __powerpc64__ | ||
1023 | case 457: /* divdu */ | ||
1024 | regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb]; | ||
1025 | goto arith_done; | ||
1026 | #endif | ||
1027 | case 459: /* divwu */ | ||
1028 | regs->gpr[rd] = (unsigned int) regs->gpr[ra] / | ||
1029 | (unsigned int) regs->gpr[rb]; | ||
1030 | goto arith_done; | ||
1031 | #ifdef __powerpc64__ | ||
1032 | case 489: /* divd */ | ||
1033 | regs->gpr[rd] = (long int) regs->gpr[ra] / | ||
1034 | (long int) regs->gpr[rb]; | ||
1035 | goto arith_done; | ||
1036 | #endif | ||
1037 | case 491: /* divw */ | ||
1038 | regs->gpr[rd] = (int) regs->gpr[ra] / | ||
1039 | (int) regs->gpr[rb]; | ||
1040 | goto arith_done; | ||
1041 | |||
1042 | |||
1043 | /* | ||
1044 | * Logical instructions | ||
1045 | */ | ||
1046 | case 26: /* cntlzw */ | ||
1047 | asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) : | ||
1048 | "r" (regs->gpr[rd])); | ||
1049 | goto logical_done; | ||
1050 | #ifdef __powerpc64__ | ||
1051 | case 58: /* cntlzd */ | ||
1052 | asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) : | ||
1053 | "r" (regs->gpr[rd])); | ||
1054 | goto logical_done; | ||
1055 | #endif | ||
1056 | case 28: /* and */ | ||
1057 | regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb]; | ||
1058 | goto logical_done; | ||
1059 | |||
1060 | case 60: /* andc */ | ||
1061 | regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb]; | ||
1062 | goto logical_done; | ||
1063 | |||
1064 | case 124: /* nor */ | ||
1065 | regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]); | ||
1066 | goto logical_done; | ||
1067 | |||
1068 | case 284: /* xor */ | ||
1069 | regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]); | ||
1070 | goto logical_done; | ||
1071 | |||
1072 | case 316: /* xor */ | ||
1073 | regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb]; | ||
1074 | goto logical_done; | ||
1075 | |||
1076 | case 412: /* orc */ | ||
1077 | regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb]; | ||
1078 | goto logical_done; | ||
1079 | |||
1080 | case 444: /* or */ | ||
1081 | regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb]; | ||
1082 | goto logical_done; | ||
1083 | |||
1084 | case 476: /* nand */ | ||
1085 | regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]); | ||
1086 | goto logical_done; | ||
1087 | |||
1088 | case 922: /* extsh */ | ||
1089 | regs->gpr[ra] = (signed short) regs->gpr[rd]; | ||
1090 | goto logical_done; | ||
1091 | |||
1092 | case 954: /* extsb */ | ||
1093 | regs->gpr[ra] = (signed char) regs->gpr[rd]; | ||
1094 | goto logical_done; | ||
1095 | #ifdef __powerpc64__ | ||
1096 | case 986: /* extsw */ | ||
1097 | regs->gpr[ra] = (signed int) regs->gpr[rd]; | ||
1098 | goto logical_done; | ||
1099 | #endif | ||
1100 | |||
1101 | /* | ||
1102 | * Shift instructions | ||
1103 | */ | ||
1104 | case 24: /* slw */ | ||
1105 | sh = regs->gpr[rb] & 0x3f; | ||
1106 | if (sh < 32) | ||
1107 | regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL; | ||
1108 | else | ||
1109 | regs->gpr[ra] = 0; | ||
1110 | goto logical_done; | ||
1111 | |||
1112 | case 536: /* srw */ | ||
1113 | sh = regs->gpr[rb] & 0x3f; | ||
1114 | if (sh < 32) | ||
1115 | regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh; | ||
1116 | else | ||
1117 | regs->gpr[ra] = 0; | ||
1118 | goto logical_done; | ||
1119 | |||
1120 | case 792: /* sraw */ | ||
1121 | sh = regs->gpr[rb] & 0x3f; | ||
1122 | ival = (signed int) regs->gpr[rd]; | ||
1123 | regs->gpr[ra] = ival >> (sh < 32 ? sh : 31); | ||
1124 | if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0)) | ||
1125 | regs->xer |= XER_CA; | ||
1126 | else | ||
1127 | regs->xer &= ~XER_CA; | ||
1128 | goto logical_done; | ||
1129 | |||
1130 | case 824: /* srawi */ | ||
1131 | sh = rb; | ||
1132 | ival = (signed int) regs->gpr[rd]; | ||
1133 | regs->gpr[ra] = ival >> sh; | ||
1134 | if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) | ||
1135 | regs->xer |= XER_CA; | ||
1136 | else | ||
1137 | regs->xer &= ~XER_CA; | ||
1138 | goto logical_done; | ||
1139 | |||
1140 | #ifdef __powerpc64__ | ||
1141 | case 27: /* sld */ | ||
1142 | sh = regs->gpr[rd] & 0x7f; | ||
1143 | if (sh < 64) | ||
1144 | regs->gpr[ra] = regs->gpr[rd] << sh; | ||
1145 | else | ||
1146 | regs->gpr[ra] = 0; | ||
1147 | goto logical_done; | ||
1148 | |||
1149 | case 539: /* srd */ | ||
1150 | sh = regs->gpr[rb] & 0x7f; | ||
1151 | if (sh < 64) | ||
1152 | regs->gpr[ra] = regs->gpr[rd] >> sh; | ||
1153 | else | ||
1154 | regs->gpr[ra] = 0; | ||
1155 | goto logical_done; | ||
1156 | |||
1157 | case 794: /* srad */ | ||
1158 | sh = regs->gpr[rb] & 0x7f; | ||
1159 | ival = (signed long int) regs->gpr[rd]; | ||
1160 | regs->gpr[ra] = ival >> (sh < 64 ? sh : 63); | ||
1161 | if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0)) | ||
1162 | regs->xer |= XER_CA; | ||
1163 | else | ||
1164 | regs->xer &= ~XER_CA; | ||
1165 | goto logical_done; | ||
1166 | |||
1167 | case 826: /* sradi with sh_5 = 0 */ | ||
1168 | case 827: /* sradi with sh_5 = 1 */ | ||
1169 | sh = rb | ((instr & 2) << 4); | ||
1170 | ival = (signed long int) regs->gpr[rd]; | ||
1171 | regs->gpr[ra] = ival >> sh; | ||
1172 | if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) | ||
1173 | regs->xer |= XER_CA; | ||
1174 | else | ||
1175 | regs->xer &= ~XER_CA; | ||
1176 | goto logical_done; | ||
1177 | #endif /* __powerpc64__ */ | ||
1178 | |||
1179 | /* | ||
1180 | * Cache instructions | ||
1181 | */ | ||
1182 | case 54: /* dcbst */ | ||
1183 | ea = xform_ea(instr, regs, 0); | ||
1184 | if (!address_ok(regs, ea, 8)) | ||
1185 | return 0; | ||
1186 | err = 0; | ||
1187 | __cacheop_user_asmx(ea, err, "dcbst"); | ||
1188 | if (err) | ||
1189 | return 0; | ||
1190 | goto instr_done; | ||
1191 | |||
1192 | case 86: /* dcbf */ | ||
1193 | ea = xform_ea(instr, regs, 0); | ||
1194 | if (!address_ok(regs, ea, 8)) | ||
1195 | return 0; | ||
1196 | err = 0; | ||
1197 | __cacheop_user_asmx(ea, err, "dcbf"); | ||
1198 | if (err) | ||
1199 | return 0; | ||
1200 | goto instr_done; | ||
1201 | |||
1202 | case 246: /* dcbtst */ | ||
1203 | if (rd == 0) { | ||
1204 | ea = xform_ea(instr, regs, 0); | ||
1205 | prefetchw((void *) ea); | ||
1206 | } | ||
1207 | goto instr_done; | ||
1208 | |||
1209 | case 278: /* dcbt */ | ||
1210 | if (rd == 0) { | ||
1211 | ea = xform_ea(instr, regs, 0); | ||
1212 | prefetch((void *) ea); | ||
1213 | } | ||
1214 | goto instr_done; | ||
1215 | |||
200 | } | 1216 | } |
1217 | break; | ||
201 | } | 1218 | } |
202 | return 0; | 1219 | |
1220 | /* | ||
1221 | * Following cases are for loads and stores, so bail out | ||
1222 | * if we're in little-endian mode. | ||
1223 | */ | ||
1224 | if (regs->msr & MSR_LE) | ||
1225 | return 0; | ||
1226 | |||
1227 | /* | ||
1228 | * Save register RA in case it's an update form load or store | ||
1229 | * and the access faults. | ||
1230 | */ | ||
1231 | old_ra = regs->gpr[ra]; | ||
1232 | |||
1233 | switch (opcode) { | ||
1234 | case 31: | ||
1235 | u = instr & 0x40; | ||
1236 | switch ((instr >> 1) & 0x3ff) { | ||
1237 | case 20: /* lwarx */ | ||
1238 | ea = xform_ea(instr, regs, 0); | ||
1239 | if (ea & 3) | ||
1240 | break; /* can't handle misaligned */ | ||
1241 | err = -EFAULT; | ||
1242 | if (!address_ok(regs, ea, 4)) | ||
1243 | goto ldst_done; | ||
1244 | err = 0; | ||
1245 | __get_user_asmx(val, ea, err, "lwarx"); | ||
1246 | if (!err) | ||
1247 | regs->gpr[rd] = val; | ||
1248 | goto ldst_done; | ||
1249 | |||
1250 | case 150: /* stwcx. */ | ||
1251 | ea = xform_ea(instr, regs, 0); | ||
1252 | if (ea & 3) | ||
1253 | break; /* can't handle misaligned */ | ||
1254 | err = -EFAULT; | ||
1255 | if (!address_ok(regs, ea, 4)) | ||
1256 | goto ldst_done; | ||
1257 | err = 0; | ||
1258 | __put_user_asmx(regs->gpr[rd], ea, err, "stwcx.", cr); | ||
1259 | if (!err) | ||
1260 | regs->ccr = (regs->ccr & 0x0fffffff) | | ||
1261 | (cr & 0xe0000000) | | ||
1262 | ((regs->xer >> 3) & 0x10000000); | ||
1263 | goto ldst_done; | ||
1264 | |||
1265 | #ifdef __powerpc64__ | ||
1266 | case 84: /* ldarx */ | ||
1267 | ea = xform_ea(instr, regs, 0); | ||
1268 | if (ea & 7) | ||
1269 | break; /* can't handle misaligned */ | ||
1270 | err = -EFAULT; | ||
1271 | if (!address_ok(regs, ea, 8)) | ||
1272 | goto ldst_done; | ||
1273 | err = 0; | ||
1274 | __get_user_asmx(val, ea, err, "ldarx"); | ||
1275 | if (!err) | ||
1276 | regs->gpr[rd] = val; | ||
1277 | goto ldst_done; | ||
1278 | |||
1279 | case 214: /* stdcx. */ | ||
1280 | ea = xform_ea(instr, regs, 0); | ||
1281 | if (ea & 7) | ||
1282 | break; /* can't handle misaligned */ | ||
1283 | err = -EFAULT; | ||
1284 | if (!address_ok(regs, ea, 8)) | ||
1285 | goto ldst_done; | ||
1286 | err = 0; | ||
1287 | __put_user_asmx(regs->gpr[rd], ea, err, "stdcx.", cr); | ||
1288 | if (!err) | ||
1289 | regs->ccr = (regs->ccr & 0x0fffffff) | | ||
1290 | (cr & 0xe0000000) | | ||
1291 | ((regs->xer >> 3) & 0x10000000); | ||
1292 | goto ldst_done; | ||
1293 | |||
1294 | case 21: /* ldx */ | ||
1295 | case 53: /* ldux */ | ||
1296 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | ||
1297 | 8, regs); | ||
1298 | goto ldst_done; | ||
1299 | #endif | ||
1300 | |||
1301 | case 23: /* lwzx */ | ||
1302 | case 55: /* lwzux */ | ||
1303 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | ||
1304 | 4, regs); | ||
1305 | goto ldst_done; | ||
1306 | |||
1307 | case 87: /* lbzx */ | ||
1308 | case 119: /* lbzux */ | ||
1309 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | ||
1310 | 1, regs); | ||
1311 | goto ldst_done; | ||
1312 | |||
1313 | #ifdef CONFIG_ALTIVEC | ||
1314 | case 103: /* lvx */ | ||
1315 | case 359: /* lvxl */ | ||
1316 | if (!(regs->msr & MSR_VEC)) | ||
1317 | break; | ||
1318 | ea = xform_ea(instr, regs, 0); | ||
1319 | err = do_vec_load(rd, do_lvx, ea, regs); | ||
1320 | goto ldst_done; | ||
1321 | |||
1322 | case 231: /* stvx */ | ||
1323 | case 487: /* stvxl */ | ||
1324 | if (!(regs->msr & MSR_VEC)) | ||
1325 | break; | ||
1326 | ea = xform_ea(instr, regs, 0); | ||
1327 | err = do_vec_store(rd, do_stvx, ea, regs); | ||
1328 | goto ldst_done; | ||
1329 | #endif /* CONFIG_ALTIVEC */ | ||
1330 | |||
1331 | #ifdef __powerpc64__ | ||
1332 | case 149: /* stdx */ | ||
1333 | case 181: /* stdux */ | ||
1334 | val = regs->gpr[rd]; | ||
1335 | err = write_mem(val, xform_ea(instr, regs, u), 8, regs); | ||
1336 | goto ldst_done; | ||
1337 | #endif | ||
1338 | |||
1339 | case 151: /* stwx */ | ||
1340 | case 183: /* stwux */ | ||
1341 | val = regs->gpr[rd]; | ||
1342 | err = write_mem(val, xform_ea(instr, regs, u), 4, regs); | ||
1343 | goto ldst_done; | ||
1344 | |||
1345 | case 215: /* stbx */ | ||
1346 | case 247: /* stbux */ | ||
1347 | val = regs->gpr[rd]; | ||
1348 | err = write_mem(val, xform_ea(instr, regs, u), 1, regs); | ||
1349 | goto ldst_done; | ||
1350 | |||
1351 | case 279: /* lhzx */ | ||
1352 | case 311: /* lhzux */ | ||
1353 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | ||
1354 | 2, regs); | ||
1355 | goto ldst_done; | ||
1356 | |||
1357 | #ifdef __powerpc64__ | ||
1358 | case 341: /* lwax */ | ||
1359 | case 373: /* lwaux */ | ||
1360 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | ||
1361 | 4, regs); | ||
1362 | if (!err) | ||
1363 | regs->gpr[rd] = (signed int) regs->gpr[rd]; | ||
1364 | goto ldst_done; | ||
1365 | #endif | ||
1366 | |||
1367 | case 343: /* lhax */ | ||
1368 | case 375: /* lhaux */ | ||
1369 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | ||
1370 | 2, regs); | ||
1371 | if (!err) | ||
1372 | regs->gpr[rd] = (signed short) regs->gpr[rd]; | ||
1373 | goto ldst_done; | ||
1374 | |||
1375 | case 407: /* sthx */ | ||
1376 | case 439: /* sthux */ | ||
1377 | val = regs->gpr[rd]; | ||
1378 | err = write_mem(val, xform_ea(instr, regs, u), 2, regs); | ||
1379 | goto ldst_done; | ||
1380 | |||
1381 | #ifdef __powerpc64__ | ||
1382 | case 532: /* ldbrx */ | ||
1383 | err = read_mem(&val, xform_ea(instr, regs, 0), 8, regs); | ||
1384 | if (!err) | ||
1385 | regs->gpr[rd] = byterev_8(val); | ||
1386 | goto ldst_done; | ||
1387 | |||
1388 | #endif | ||
1389 | |||
1390 | case 534: /* lwbrx */ | ||
1391 | err = read_mem(&val, xform_ea(instr, regs, 0), 4, regs); | ||
1392 | if (!err) | ||
1393 | regs->gpr[rd] = byterev_4(val); | ||
1394 | goto ldst_done; | ||
1395 | |||
1396 | case 535: /* lfsx */ | ||
1397 | case 567: /* lfsux */ | ||
1398 | if (!(regs->msr & MSR_FP)) | ||
1399 | break; | ||
1400 | ea = xform_ea(instr, regs, u); | ||
1401 | err = do_fp_load(rd, do_lfs, ea, 4, regs); | ||
1402 | goto ldst_done; | ||
1403 | |||
1404 | case 599: /* lfdx */ | ||
1405 | case 631: /* lfdux */ | ||
1406 | if (!(regs->msr & MSR_FP)) | ||
1407 | break; | ||
1408 | ea = xform_ea(instr, regs, u); | ||
1409 | err = do_fp_load(rd, do_lfd, ea, 8, regs); | ||
1410 | goto ldst_done; | ||
1411 | |||
1412 | case 663: /* stfsx */ | ||
1413 | case 695: /* stfsux */ | ||
1414 | if (!(regs->msr & MSR_FP)) | ||
1415 | break; | ||
1416 | ea = xform_ea(instr, regs, u); | ||
1417 | err = do_fp_store(rd, do_stfs, ea, 4, regs); | ||
1418 | goto ldst_done; | ||
1419 | |||
1420 | case 727: /* stfdx */ | ||
1421 | case 759: /* stfdux */ | ||
1422 | if (!(regs->msr & MSR_FP)) | ||
1423 | break; | ||
1424 | ea = xform_ea(instr, regs, u); | ||
1425 | err = do_fp_store(rd, do_stfd, ea, 8, regs); | ||
1426 | goto ldst_done; | ||
1427 | |||
1428 | #ifdef __powerpc64__ | ||
1429 | case 660: /* stdbrx */ | ||
1430 | val = byterev_8(regs->gpr[rd]); | ||
1431 | err = write_mem(val, xform_ea(instr, regs, 0), 8, regs); | ||
1432 | goto ldst_done; | ||
1433 | |||
1434 | #endif | ||
1435 | case 662: /* stwbrx */ | ||
1436 | val = byterev_4(regs->gpr[rd]); | ||
1437 | err = write_mem(val, xform_ea(instr, regs, 0), 4, regs); | ||
1438 | goto ldst_done; | ||
1439 | |||
1440 | case 790: /* lhbrx */ | ||
1441 | err = read_mem(&val, xform_ea(instr, regs, 0), 2, regs); | ||
1442 | if (!err) | ||
1443 | regs->gpr[rd] = byterev_2(val); | ||
1444 | goto ldst_done; | ||
1445 | |||
1446 | case 918: /* sthbrx */ | ||
1447 | val = byterev_2(regs->gpr[rd]); | ||
1448 | err = write_mem(val, xform_ea(instr, regs, 0), 2, regs); | ||
1449 | goto ldst_done; | ||
1450 | |||
1451 | #ifdef CONFIG_VSX | ||
1452 | case 844: /* lxvd2x */ | ||
1453 | case 876: /* lxvd2ux */ | ||
1454 | if (!(regs->msr & MSR_VSX)) | ||
1455 | break; | ||
1456 | rd |= (instr & 1) << 5; | ||
1457 | ea = xform_ea(instr, regs, u); | ||
1458 | err = do_vsx_load(rd, do_lxvd2x, ea, regs); | ||
1459 | goto ldst_done; | ||
1460 | |||
1461 | case 972: /* stxvd2x */ | ||
1462 | case 1004: /* stxvd2ux */ | ||
1463 | if (!(regs->msr & MSR_VSX)) | ||
1464 | break; | ||
1465 | rd |= (instr & 1) << 5; | ||
1466 | ea = xform_ea(instr, regs, u); | ||
1467 | err = do_vsx_store(rd, do_stxvd2x, ea, regs); | ||
1468 | goto ldst_done; | ||
1469 | |||
1470 | #endif /* CONFIG_VSX */ | ||
1471 | } | ||
1472 | break; | ||
1473 | |||
1474 | case 32: /* lwz */ | ||
1475 | case 33: /* lwzu */ | ||
1476 | err = read_mem(®s->gpr[rd], dform_ea(instr, regs), 4, regs); | ||
1477 | goto ldst_done; | ||
1478 | |||
1479 | case 34: /* lbz */ | ||
1480 | case 35: /* lbzu */ | ||
1481 | err = read_mem(®s->gpr[rd], dform_ea(instr, regs), 1, regs); | ||
1482 | goto ldst_done; | ||
1483 | |||
1484 | case 36: /* stw */ | ||
1485 | case 37: /* stwu */ | ||
1486 | val = regs->gpr[rd]; | ||
1487 | err = write_mem(val, dform_ea(instr, regs), 4, regs); | ||
1488 | goto ldst_done; | ||
1489 | |||
1490 | case 38: /* stb */ | ||
1491 | case 39: /* stbu */ | ||
1492 | val = regs->gpr[rd]; | ||
1493 | err = write_mem(val, dform_ea(instr, regs), 1, regs); | ||
1494 | goto ldst_done; | ||
1495 | |||
1496 | case 40: /* lhz */ | ||
1497 | case 41: /* lhzu */ | ||
1498 | err = read_mem(®s->gpr[rd], dform_ea(instr, regs), 2, regs); | ||
1499 | goto ldst_done; | ||
1500 | |||
1501 | case 42: /* lha */ | ||
1502 | case 43: /* lhau */ | ||
1503 | err = read_mem(®s->gpr[rd], dform_ea(instr, regs), 2, regs); | ||
1504 | if (!err) | ||
1505 | regs->gpr[rd] = (signed short) regs->gpr[rd]; | ||
1506 | goto ldst_done; | ||
1507 | |||
1508 | case 44: /* sth */ | ||
1509 | case 45: /* sthu */ | ||
1510 | val = regs->gpr[rd]; | ||
1511 | err = write_mem(val, dform_ea(instr, regs), 2, regs); | ||
1512 | goto ldst_done; | ||
1513 | |||
1514 | case 46: /* lmw */ | ||
1515 | ra = (instr >> 16) & 0x1f; | ||
1516 | if (ra >= rd) | ||
1517 | break; /* invalid form, ra in range to load */ | ||
1518 | ea = dform_ea(instr, regs); | ||
1519 | do { | ||
1520 | err = read_mem(®s->gpr[rd], ea, 4, regs); | ||
1521 | if (err) | ||
1522 | return 0; | ||
1523 | ea += 4; | ||
1524 | } while (++rd < 32); | ||
1525 | goto instr_done; | ||
1526 | |||
1527 | case 47: /* stmw */ | ||
1528 | ea = dform_ea(instr, regs); | ||
1529 | do { | ||
1530 | err = write_mem(regs->gpr[rd], ea, 4, regs); | ||
1531 | if (err) | ||
1532 | return 0; | ||
1533 | ea += 4; | ||
1534 | } while (++rd < 32); | ||
1535 | goto instr_done; | ||
1536 | |||
1537 | case 48: /* lfs */ | ||
1538 | case 49: /* lfsu */ | ||
1539 | if (!(regs->msr & MSR_FP)) | ||
1540 | break; | ||
1541 | ea = dform_ea(instr, regs); | ||
1542 | err = do_fp_load(rd, do_lfs, ea, 4, regs); | ||
1543 | goto ldst_done; | ||
1544 | |||
1545 | case 50: /* lfd */ | ||
1546 | case 51: /* lfdu */ | ||
1547 | if (!(regs->msr & MSR_FP)) | ||
1548 | break; | ||
1549 | ea = dform_ea(instr, regs); | ||
1550 | err = do_fp_load(rd, do_lfd, ea, 8, regs); | ||
1551 | goto ldst_done; | ||
1552 | |||
1553 | case 52: /* stfs */ | ||
1554 | case 53: /* stfsu */ | ||
1555 | if (!(regs->msr & MSR_FP)) | ||
1556 | break; | ||
1557 | ea = dform_ea(instr, regs); | ||
1558 | err = do_fp_store(rd, do_stfs, ea, 4, regs); | ||
1559 | goto ldst_done; | ||
1560 | |||
1561 | case 54: /* stfd */ | ||
1562 | case 55: /* stfdu */ | ||
1563 | if (!(regs->msr & MSR_FP)) | ||
1564 | break; | ||
1565 | ea = dform_ea(instr, regs); | ||
1566 | err = do_fp_store(rd, do_stfd, ea, 8, regs); | ||
1567 | goto ldst_done; | ||
1568 | |||
1569 | #ifdef __powerpc64__ | ||
1570 | case 58: /* ld[u], lwa */ | ||
1571 | switch (instr & 3) { | ||
1572 | case 0: /* ld */ | ||
1573 | err = read_mem(®s->gpr[rd], dsform_ea(instr, regs), | ||
1574 | 8, regs); | ||
1575 | goto ldst_done; | ||
1576 | case 1: /* ldu */ | ||
1577 | err = read_mem(®s->gpr[rd], dsform_ea(instr, regs), | ||
1578 | 8, regs); | ||
1579 | goto ldst_done; | ||
1580 | case 2: /* lwa */ | ||
1581 | err = read_mem(®s->gpr[rd], dsform_ea(instr, regs), | ||
1582 | 4, regs); | ||
1583 | if (!err) | ||
1584 | regs->gpr[rd] = (signed int) regs->gpr[rd]; | ||
1585 | goto ldst_done; | ||
1586 | } | ||
1587 | break; | ||
1588 | |||
1589 | case 62: /* std[u] */ | ||
1590 | val = regs->gpr[rd]; | ||
1591 | switch (instr & 3) { | ||
1592 | case 0: /* std */ | ||
1593 | err = write_mem(val, dsform_ea(instr, regs), 8, regs); | ||
1594 | goto ldst_done; | ||
1595 | case 1: /* stdu */ | ||
1596 | err = write_mem(val, dsform_ea(instr, regs), 8, regs); | ||
1597 | goto ldst_done; | ||
1598 | } | ||
1599 | break; | ||
1600 | #endif /* __powerpc64__ */ | ||
1601 | |||
1602 | } | ||
1603 | err = -EINVAL; | ||
1604 | |||
1605 | ldst_done: | ||
1606 | if (err) { | ||
1607 | regs->gpr[ra] = old_ra; | ||
1608 | return 0; /* invoke DSI if -EFAULT? */ | ||
1609 | } | ||
1610 | instr_done: | ||
1611 | regs->nip += 4; | ||
1612 | #ifdef __powerpc64__ | ||
1613 | if ((regs->msr & MSR_SF) == 0) | ||
1614 | regs->nip &= 0xffffffffUL; | ||
1615 | #endif | ||
1616 | return 1; | ||
1617 | |||
1618 | logical_done: | ||
1619 | if (instr & 1) | ||
1620 | set_cr0(regs, ra); | ||
1621 | goto instr_done; | ||
1622 | |||
1623 | arith_done: | ||
1624 | if (instr & 1) | ||
1625 | set_cr0(regs, rd); | ||
1626 | goto instr_done; | ||
203 | } | 1627 | } |