aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/math-emu
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2008-11-16 23:07:11 -0500
committerDavid S. Miller <davem@davemloft.net>2008-12-04 12:16:56 -0500
commit5115f39c2034cb80a050d996a2a6343bce189628 (patch)
treef106dcf237f9f7de49d4ad5d5f3b81968f663e46 /arch/sparc/math-emu
parent774434bf33bdc876c2818eba34e416fe1fc7a746 (diff)
sparc64: unify math-emu
Move relavent files to sparc/math-emu and adjust path/include accordingly. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/math-emu')
-rw-r--r--arch/sparc/math-emu/math_64.c513
-rw-r--r--arch/sparc/math-emu/sfp-util_64.h120
2 files changed, 633 insertions, 0 deletions
diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c
new file mode 100644
index 000000000000..6863c9bde25c
--- /dev/null
+++ b/arch/sparc/math-emu/math_64.c
@@ -0,0 +1,513 @@
1/*
2 * arch/sparc64/math-emu/math.c
3 *
4 * Copyright (C) 1997,1999 Jakub Jelinek (jj@ultra.linux.cz)
5 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
6 *
7 * Emulation routines originate from soft-fp package, which is part
8 * of glibc and has appropriate copyrights in it.
9 */
10
11#include <linux/types.h>
12#include <linux/sched.h>
13#include <linux/errno.h>
14
15#include <asm/fpumacro.h>
16#include <asm/ptrace.h>
17#include <asm/uaccess.h>
18
19#include "sfp-util_64.h"
20#include <math-emu/soft-fp.h>
21#include <math-emu/single.h>
22#include <math-emu/double.h>
23#include <math-emu/quad.h>
24
25/* QUAD - ftt == 3 */
26#define FMOVQ 0x003
27#define FNEGQ 0x007
28#define FABSQ 0x00b
29#define FSQRTQ 0x02b
30#define FADDQ 0x043
31#define FSUBQ 0x047
32#define FMULQ 0x04b
33#define FDIVQ 0x04f
34#define FDMULQ 0x06e
35#define FQTOX 0x083
36#define FXTOQ 0x08c
37#define FQTOS 0x0c7
38#define FQTOD 0x0cb
39#define FITOQ 0x0cc
40#define FSTOQ 0x0cd
41#define FDTOQ 0x0ce
42#define FQTOI 0x0d3
43/* SUBNORMAL - ftt == 2 */
44#define FSQRTS 0x029
45#define FSQRTD 0x02a
46#define FADDS 0x041
47#define FADDD 0x042
48#define FSUBS 0x045
49#define FSUBD 0x046
50#define FMULS 0x049
51#define FMULD 0x04a
52#define FDIVS 0x04d
53#define FDIVD 0x04e
54#define FSMULD 0x069
55#define FSTOX 0x081
56#define FDTOX 0x082
57#define FDTOS 0x0c6
58#define FSTOD 0x0c9
59#define FSTOI 0x0d1
60#define FDTOI 0x0d2
61#define FXTOS 0x084 /* Only Ultra-III generates this. */
62#define FXTOD 0x088 /* Only Ultra-III generates this. */
63#if 0 /* Optimized inline in sparc64/kernel/entry.S */
64#define FITOS 0x0c4 /* Only Ultra-III generates this. */
65#endif
66#define FITOD 0x0c8 /* Only Ultra-III generates this. */
67/* FPOP2 */
68#define FCMPQ 0x053
69#define FCMPEQ 0x057
70#define FMOVQ0 0x003
71#define FMOVQ1 0x043
72#define FMOVQ2 0x083
73#define FMOVQ3 0x0c3
74#define FMOVQI 0x103
75#define FMOVQX 0x183
76#define FMOVQZ 0x027
77#define FMOVQLE 0x047
78#define FMOVQLZ 0x067
79#define FMOVQNZ 0x0a7
80#define FMOVQGZ 0x0c7
81#define FMOVQGE 0x0e7
82
83#define FSR_TEM_SHIFT 23UL
84#define FSR_TEM_MASK (0x1fUL << FSR_TEM_SHIFT)
85#define FSR_AEXC_SHIFT 5UL
86#define FSR_AEXC_MASK (0x1fUL << FSR_AEXC_SHIFT)
87#define FSR_CEXC_SHIFT 0UL
88#define FSR_CEXC_MASK (0x1fUL << FSR_CEXC_SHIFT)
89
90/* All routines returning an exception to raise should detect
91 * such exceptions _before_ rounding to be consistent with
92 * the behavior of the hardware in the implemented cases
93 * (and thus with the recommendations in the V9 architecture
94 * manual).
95 *
96 * We return 0 if a SIGFPE should be sent, 1 otherwise.
97 */
98static inline int record_exception(struct pt_regs *regs, int eflag)
99{
100 u64 fsr = current_thread_info()->xfsr[0];
101 int would_trap;
102
103 /* Determine if this exception would have generated a trap. */
104 would_trap = (fsr & ((long)eflag << FSR_TEM_SHIFT)) != 0UL;
105
106 /* If trapping, we only want to signal one bit. */
107 if(would_trap != 0) {
108 eflag &= ((fsr & FSR_TEM_MASK) >> FSR_TEM_SHIFT);
109 if((eflag & (eflag - 1)) != 0) {
110 if(eflag & FP_EX_INVALID)
111 eflag = FP_EX_INVALID;
112 else if(eflag & FP_EX_OVERFLOW)
113 eflag = FP_EX_OVERFLOW;
114 else if(eflag & FP_EX_UNDERFLOW)
115 eflag = FP_EX_UNDERFLOW;
116 else if(eflag & FP_EX_DIVZERO)
117 eflag = FP_EX_DIVZERO;
118 else if(eflag & FP_EX_INEXACT)
119 eflag = FP_EX_INEXACT;
120 }
121 }
122
123 /* Set CEXC, here is the rule:
124 *
125 * In general all FPU ops will set one and only one
126 * bit in the CEXC field, this is always the case
127 * when the IEEE exception trap is enabled in TEM.
128 */
129 fsr &= ~(FSR_CEXC_MASK);
130 fsr |= ((long)eflag << FSR_CEXC_SHIFT);
131
132 /* Set the AEXC field, rule is:
133 *
134 * If a trap would not be generated, the
135 * CEXC just generated is OR'd into the
136 * existing value of AEXC.
137 */
138 if(would_trap == 0)
139 fsr |= ((long)eflag << FSR_AEXC_SHIFT);
140
141 /* If trapping, indicate fault trap type IEEE. */
142 if(would_trap != 0)
143 fsr |= (1UL << 14);
144
145 current_thread_info()->xfsr[0] = fsr;
146
147 /* If we will not trap, advance the program counter over
148 * the instruction being handled.
149 */
150 if(would_trap == 0) {
151 regs->tpc = regs->tnpc;
152 regs->tnpc += 4;
153 }
154
155 return (would_trap ? 0 : 1);
156}
157
158typedef union {
159 u32 s;
160 u64 d;
161 u64 q[2];
162} *argp;
163
164int do_mathemu(struct pt_regs *regs, struct fpustate *f)
165{
166 unsigned long pc = regs->tpc;
167 unsigned long tstate = regs->tstate;
168 u32 insn = 0;
169 int type = 0;
170 /* ftt tells which ftt it may happen in, r is rd, b is rs2 and a is rs1. The *u arg tells
171 whether the argument should be packed/unpacked (0 - do not unpack/pack, 1 - unpack/pack)
172 non-u args tells the size of the argument (0 - no argument, 1 - single, 2 - double, 3 - quad */
173#define TYPE(ftt, r, ru, b, bu, a, au) type = (au << 2) | (a << 0) | (bu << 5) | (b << 3) | (ru << 8) | (r << 6) | (ftt << 9)
174 int freg;
175 static u64 zero[2] = { 0L, 0L };
176 int flags;
177 FP_DECL_EX;
178 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
179 FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
180 FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
181 int IR;
182 long XR, xfsr;
183
184 if (tstate & TSTATE_PRIV)
185 die_if_kernel("unfinished/unimplemented FPop from kernel", regs);
186 if (test_thread_flag(TIF_32BIT))
187 pc = (u32)pc;
188 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
189 if ((insn & 0xc1f80000) == 0x81a00000) /* FPOP1 */ {
190 switch ((insn >> 5) & 0x1ff) {
191 /* QUAD - ftt == 3 */
192 case FMOVQ:
193 case FNEGQ:
194 case FABSQ: TYPE(3,3,0,3,0,0,0); break;
195 case FSQRTQ: TYPE(3,3,1,3,1,0,0); break;
196 case FADDQ:
197 case FSUBQ:
198 case FMULQ:
199 case FDIVQ: TYPE(3,3,1,3,1,3,1); break;
200 case FDMULQ: TYPE(3,3,1,2,1,2,1); break;
201 case FQTOX: TYPE(3,2,0,3,1,0,0); break;
202 case FXTOQ: TYPE(3,3,1,2,0,0,0); break;
203 case FQTOS: TYPE(3,1,1,3,1,0,0); break;
204 case FQTOD: TYPE(3,2,1,3,1,0,0); break;
205 case FITOQ: TYPE(3,3,1,1,0,0,0); break;
206 case FSTOQ: TYPE(3,3,1,1,1,0,0); break;
207 case FDTOQ: TYPE(3,3,1,2,1,0,0); break;
208 case FQTOI: TYPE(3,1,0,3,1,0,0); break;
209
210 /* We can get either unimplemented or unfinished
211 * for these cases. Pre-Niagara systems generate
212 * unfinished fpop for SUBNORMAL cases, and Niagara
213 * always gives unimplemented fpop for fsqrt{s,d}.
214 */
215 case FSQRTS: {
216 unsigned long x = current_thread_info()->xfsr[0];
217
218 x = (x >> 14) & 0xf;
219 TYPE(x,1,1,1,1,0,0);
220 break;
221 }
222
223 case FSQRTD: {
224 unsigned long x = current_thread_info()->xfsr[0];
225
226 x = (x >> 14) & 0xf;
227 TYPE(x,2,1,2,1,0,0);
228 break;
229 }
230
231 /* SUBNORMAL - ftt == 2 */
232 case FADDD:
233 case FSUBD:
234 case FMULD:
235 case FDIVD: TYPE(2,2,1,2,1,2,1); break;
236 case FADDS:
237 case FSUBS:
238 case FMULS:
239 case FDIVS: TYPE(2,1,1,1,1,1,1); break;
240 case FSMULD: TYPE(2,2,1,1,1,1,1); break;
241 case FSTOX: TYPE(2,2,0,1,1,0,0); break;
242 case FDTOX: TYPE(2,2,0,2,1,0,0); break;
243 case FDTOS: TYPE(2,1,1,2,1,0,0); break;
244 case FSTOD: TYPE(2,2,1,1,1,0,0); break;
245 case FSTOI: TYPE(2,1,0,1,1,0,0); break;
246 case FDTOI: TYPE(2,1,0,2,1,0,0); break;
247
248 /* Only Ultra-III generates these */
249 case FXTOS: TYPE(2,1,1,2,0,0,0); break;
250 case FXTOD: TYPE(2,2,1,2,0,0,0); break;
251#if 0 /* Optimized inline in sparc64/kernel/entry.S */
252 case FITOS: TYPE(2,1,1,1,0,0,0); break;
253#endif
254 case FITOD: TYPE(2,2,1,1,0,0,0); break;
255 }
256 }
257 else if ((insn & 0xc1f80000) == 0x81a80000) /* FPOP2 */ {
258 IR = 2;
259 switch ((insn >> 5) & 0x1ff) {
260 case FCMPQ: TYPE(3,0,0,3,1,3,1); break;
261 case FCMPEQ: TYPE(3,0,0,3,1,3,1); break;
262 /* Now the conditional fmovq support */
263 case FMOVQ0:
264 case FMOVQ1:
265 case FMOVQ2:
266 case FMOVQ3:
267 /* fmovq %fccX, %fY, %fZ */
268 if (!((insn >> 11) & 3))
269 XR = current_thread_info()->xfsr[0] >> 10;
270 else
271 XR = current_thread_info()->xfsr[0] >> (30 + ((insn >> 10) & 0x6));
272 XR &= 3;
273 IR = 0;
274 switch ((insn >> 14) & 0x7) {
275 /* case 0: IR = 0; break; */ /* Never */
276 case 1: if (XR) IR = 1; break; /* Not Equal */
277 case 2: if (XR == 1 || XR == 2) IR = 1; break; /* Less or Greater */
278 case 3: if (XR & 1) IR = 1; break; /* Unordered or Less */
279 case 4: if (XR == 1) IR = 1; break; /* Less */
280 case 5: if (XR & 2) IR = 1; break; /* Unordered or Greater */
281 case 6: if (XR == 2) IR = 1; break; /* Greater */
282 case 7: if (XR == 3) IR = 1; break; /* Unordered */
283 }
284 if ((insn >> 14) & 8)
285 IR ^= 1;
286 break;
287 case FMOVQI:
288 case FMOVQX:
289 /* fmovq %[ix]cc, %fY, %fZ */
290 XR = regs->tstate >> 32;
291 if ((insn >> 5) & 0x80)
292 XR >>= 4;
293 XR &= 0xf;
294 IR = 0;
295 freg = ((XR >> 2) ^ XR) & 2;
296 switch ((insn >> 14) & 0x7) {
297 /* case 0: IR = 0; break; */ /* Never */
298 case 1: if (XR & 4) IR = 1; break; /* Equal */
299 case 2: if ((XR & 4) || freg) IR = 1; break; /* Less or Equal */
300 case 3: if (freg) IR = 1; break; /* Less */
301 case 4: if (XR & 5) IR = 1; break; /* Less or Equal Unsigned */
302 case 5: if (XR & 1) IR = 1; break; /* Carry Set */
303 case 6: if (XR & 8) IR = 1; break; /* Negative */
304 case 7: if (XR & 2) IR = 1; break; /* Overflow Set */
305 }
306 if ((insn >> 14) & 8)
307 IR ^= 1;
308 break;
309 case FMOVQZ:
310 case FMOVQLE:
311 case FMOVQLZ:
312 case FMOVQNZ:
313 case FMOVQGZ:
314 case FMOVQGE:
315 freg = (insn >> 14) & 0x1f;
316 if (!freg)
317 XR = 0;
318 else if (freg < 16)
319 XR = regs->u_regs[freg];
320 else if (test_thread_flag(TIF_32BIT)) {
321 struct reg_window32 __user *win32;
322 flushw_user ();
323 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
324 get_user(XR, &win32->locals[freg - 16]);
325 } else {
326 struct reg_window __user *win;
327 flushw_user ();
328 win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
329 get_user(XR, &win->locals[freg - 16]);
330 }
331 IR = 0;
332 switch ((insn >> 10) & 3) {
333 case 1: if (!XR) IR = 1; break; /* Register Zero */
334 case 2: if (XR <= 0) IR = 1; break; /* Register Less Than or Equal to Zero */
335 case 3: if (XR < 0) IR = 1; break; /* Register Less Than Zero */
336 }
337 if ((insn >> 10) & 4)
338 IR ^= 1;
339 break;
340 }
341 if (IR == 0) {
342 /* The fmov test was false. Do a nop instead */
343 current_thread_info()->xfsr[0] &= ~(FSR_CEXC_MASK);
344 regs->tpc = regs->tnpc;
345 regs->tnpc += 4;
346 return 1;
347 } else if (IR == 1) {
348 /* Change the instruction into plain fmovq */
349 insn = (insn & 0x3e00001f) | 0x81a00060;
350 TYPE(3,3,0,3,0,0,0);
351 }
352 }
353 }
354 if (type) {
355 argp rs1 = NULL, rs2 = NULL, rd = NULL;
356
357 freg = (current_thread_info()->xfsr[0] >> 14) & 0xf;
358 if (freg != (type >> 9))
359 goto err;
360 current_thread_info()->xfsr[0] &= ~0x1c000;
361 freg = ((insn >> 14) & 0x1f);
362 switch (type & 0x3) {
363 case 3: if (freg & 2) {
364 current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
365 goto err;
366 }
367 case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
368 case 1: rs1 = (argp)&f->regs[freg];
369 flags = (freg < 32) ? FPRS_DL : FPRS_DU;
370 if (!(current_thread_info()->fpsaved[0] & flags))
371 rs1 = (argp)&zero;
372 break;
373 }
374 switch (type & 0x7) {
375 case 7: FP_UNPACK_QP (QA, rs1); break;
376 case 6: FP_UNPACK_DP (DA, rs1); break;
377 case 5: FP_UNPACK_SP (SA, rs1); break;
378 }
379 freg = (insn & 0x1f);
380 switch ((type >> 3) & 0x3) {
381 case 3: if (freg & 2) {
382 current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
383 goto err;
384 }
385 case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
386 case 1: rs2 = (argp)&f->regs[freg];
387 flags = (freg < 32) ? FPRS_DL : FPRS_DU;
388 if (!(current_thread_info()->fpsaved[0] & flags))
389 rs2 = (argp)&zero;
390 break;
391 }
392 switch ((type >> 3) & 0x7) {
393 case 7: FP_UNPACK_QP (QB, rs2); break;
394 case 6: FP_UNPACK_DP (DB, rs2); break;
395 case 5: FP_UNPACK_SP (SB, rs2); break;
396 }
397 freg = ((insn >> 25) & 0x1f);
398 switch ((type >> 6) & 0x3) {
399 case 3: if (freg & 2) {
400 current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
401 goto err;
402 }
403 case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
404 case 1: rd = (argp)&f->regs[freg];
405 flags = (freg < 32) ? FPRS_DL : FPRS_DU;
406 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
407 current_thread_info()->fpsaved[0] = FPRS_FEF;
408 current_thread_info()->gsr[0] = 0;
409 }
410 if (!(current_thread_info()->fpsaved[0] & flags)) {
411 if (freg < 32)
412 memset(f->regs, 0, 32*sizeof(u32));
413 else
414 memset(f->regs+32, 0, 32*sizeof(u32));
415 }
416 current_thread_info()->fpsaved[0] |= flags;
417 break;
418 }
419 switch ((insn >> 5) & 0x1ff) {
420 /* + */
421 case FADDS: FP_ADD_S (SR, SA, SB); break;
422 case FADDD: FP_ADD_D (DR, DA, DB); break;
423 case FADDQ: FP_ADD_Q (QR, QA, QB); break;
424 /* - */
425 case FSUBS: FP_SUB_S (SR, SA, SB); break;
426 case FSUBD: FP_SUB_D (DR, DA, DB); break;
427 case FSUBQ: FP_SUB_Q (QR, QA, QB); break;
428 /* * */
429 case FMULS: FP_MUL_S (SR, SA, SB); break;
430 case FSMULD: FP_CONV (D, S, 1, 1, DA, SA);
431 FP_CONV (D, S, 1, 1, DB, SB);
432 case FMULD: FP_MUL_D (DR, DA, DB); break;
433 case FDMULQ: FP_CONV (Q, D, 2, 1, QA, DA);
434 FP_CONV (Q, D, 2, 1, QB, DB);
435 case FMULQ: FP_MUL_Q (QR, QA, QB); break;
436 /* / */
437 case FDIVS: FP_DIV_S (SR, SA, SB); break;
438 case FDIVD: FP_DIV_D (DR, DA, DB); break;
439 case FDIVQ: FP_DIV_Q (QR, QA, QB); break;
440 /* sqrt */
441 case FSQRTS: FP_SQRT_S (SR, SB); break;
442 case FSQRTD: FP_SQRT_D (DR, DB); break;
443 case FSQRTQ: FP_SQRT_Q (QR, QB); break;
444 /* mov */
445 case FMOVQ: rd->q[0] = rs2->q[0]; rd->q[1] = rs2->q[1]; break;
446 case FABSQ: rd->q[0] = rs2->q[0] & 0x7fffffffffffffffUL; rd->q[1] = rs2->q[1]; break;
447 case FNEGQ: rd->q[0] = rs2->q[0] ^ 0x8000000000000000UL; rd->q[1] = rs2->q[1]; break;
448 /* float to int */
449 case FSTOI: FP_TO_INT_S (IR, SB, 32, 1); break;
450 case FDTOI: FP_TO_INT_D (IR, DB, 32, 1); break;
451 case FQTOI: FP_TO_INT_Q (IR, QB, 32, 1); break;
452 case FSTOX: FP_TO_INT_S (XR, SB, 64, 1); break;
453 case FDTOX: FP_TO_INT_D (XR, DB, 64, 1); break;
454 case FQTOX: FP_TO_INT_Q (XR, QB, 64, 1); break;
455 /* int to float */
456 case FITOQ: IR = rs2->s; FP_FROM_INT_Q (QR, IR, 32, int); break;
457 case FXTOQ: XR = rs2->d; FP_FROM_INT_Q (QR, XR, 64, long); break;
458 /* Only Ultra-III generates these */
459 case FXTOS: XR = rs2->d; FP_FROM_INT_S (SR, XR, 64, long); break;
460 case FXTOD: XR = rs2->d; FP_FROM_INT_D (DR, XR, 64, long); break;
461#if 0 /* Optimized inline in sparc64/kernel/entry.S */
462 case FITOS: IR = rs2->s; FP_FROM_INT_S (SR, IR, 32, int); break;
463#endif
464 case FITOD: IR = rs2->s; FP_FROM_INT_D (DR, IR, 32, int); break;
465 /* float to float */
466 case FSTOD: FP_CONV (D, S, 1, 1, DR, SB); break;
467 case FSTOQ: FP_CONV (Q, S, 2, 1, QR, SB); break;
468 case FDTOQ: FP_CONV (Q, D, 2, 1, QR, DB); break;
469 case FDTOS: FP_CONV (S, D, 1, 1, SR, DB); break;
470 case FQTOS: FP_CONV (S, Q, 1, 2, SR, QB); break;
471 case FQTOD: FP_CONV (D, Q, 1, 2, DR, QB); break;
472 /* comparison */
473 case FCMPQ:
474 case FCMPEQ:
475 FP_CMP_Q(XR, QB, QA, 3);
476 if (XR == 3 &&
477 (((insn >> 5) & 0x1ff) == FCMPEQ ||
478 FP_ISSIGNAN_Q(QA) ||
479 FP_ISSIGNAN_Q(QB)))
480 FP_SET_EXCEPTION (FP_EX_INVALID);
481 }
482 if (!FP_INHIBIT_RESULTS) {
483 switch ((type >> 6) & 0x7) {
484 case 0: xfsr = current_thread_info()->xfsr[0];
485 if (XR == -1) XR = 2;
486 switch (freg & 3) {
487 /* fcc0, 1, 2, 3 */
488 case 0: xfsr &= ~0xc00; xfsr |= (XR << 10); break;
489 case 1: xfsr &= ~0x300000000UL; xfsr |= (XR << 32); break;
490 case 2: xfsr &= ~0xc00000000UL; xfsr |= (XR << 34); break;
491 case 3: xfsr &= ~0x3000000000UL; xfsr |= (XR << 36); break;
492 }
493 current_thread_info()->xfsr[0] = xfsr;
494 break;
495 case 1: rd->s = IR; break;
496 case 2: rd->d = XR; break;
497 case 5: FP_PACK_SP (rd, SR); break;
498 case 6: FP_PACK_DP (rd, DR); break;
499 case 7: FP_PACK_QP (rd, QR); break;
500 }
501 }
502
503 if(_fex != 0)
504 return record_exception(regs, _fex);
505
506 /* Success and no exceptions detected. */
507 current_thread_info()->xfsr[0] &= ~(FSR_CEXC_MASK);
508 regs->tpc = regs->tnpc;
509 regs->tnpc += 4;
510 return 1;
511 }
512err: return 0;
513}
diff --git a/arch/sparc/math-emu/sfp-util_64.h b/arch/sparc/math-emu/sfp-util_64.h
new file mode 100644
index 000000000000..425d3cf01af4
--- /dev/null
+++ b/arch/sparc/math-emu/sfp-util_64.h
@@ -0,0 +1,120 @@
1/*
2 * arch/sparc64/math-emu/sfp-util.h
3 *
4 * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
5 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
6 *
7 */
8
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/types.h>
12#include <asm/byteorder.h>
13
14#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
15 __asm__ ("addcc %4,%5,%1\n\t" \
16 "add %2,%3,%0\n\t" \
17 "bcs,a,pn %%xcc, 1f\n\t" \
18 "add %0, 1, %0\n" \
19 "1:" \
20 : "=r" ((UDItype)(sh)), \
21 "=&r" ((UDItype)(sl)) \
22 : "r" ((UDItype)(ah)), \
23 "r" ((UDItype)(bh)), \
24 "r" ((UDItype)(al)), \
25 "r" ((UDItype)(bl)) \
26 : "cc")
27
28#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
29 __asm__ ("subcc %4,%5,%1\n\t" \
30 "sub %2,%3,%0\n\t" \
31 "bcs,a,pn %%xcc, 1f\n\t" \
32 "sub %0, 1, %0\n" \
33 "1:" \
34 : "=r" ((UDItype)(sh)), \
35 "=&r" ((UDItype)(sl)) \
36 : "r" ((UDItype)(ah)), \
37 "r" ((UDItype)(bh)), \
38 "r" ((UDItype)(al)), \
39 "r" ((UDItype)(bl)) \
40 : "cc")
41
42#define umul_ppmm(wh, wl, u, v) \
43 do { \
44 UDItype tmp1, tmp2, tmp3, tmp4; \
45 __asm__ __volatile__ ( \
46 "srl %7,0,%3\n\t" \
47 "mulx %3,%6,%1\n\t" \
48 "srlx %6,32,%2\n\t" \
49 "mulx %2,%3,%4\n\t" \
50 "sllx %4,32,%5\n\t" \
51 "srl %6,0,%3\n\t" \
52 "sub %1,%5,%5\n\t" \
53 "srlx %5,32,%5\n\t" \
54 "addcc %4,%5,%4\n\t" \
55 "srlx %7,32,%5\n\t" \
56 "mulx %3,%5,%3\n\t" \
57 "mulx %2,%5,%5\n\t" \
58 "sethi %%hi(0x80000000),%2\n\t" \
59 "addcc %4,%3,%4\n\t" \
60 "srlx %4,32,%4\n\t" \
61 "add %2,%2,%2\n\t" \
62 "movcc %%xcc,%%g0,%2\n\t" \
63 "addcc %5,%4,%5\n\t" \
64 "sllx %3,32,%3\n\t" \
65 "add %1,%3,%1\n\t" \
66 "add %5,%2,%0" \
67 : "=r" ((UDItype)(wh)), \
68 "=&r" ((UDItype)(wl)), \
69 "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4) \
70 : "r" ((UDItype)(u)), \
71 "r" ((UDItype)(v)) \
72 : "cc"); \
73 } while (0)
74
75#define udiv_qrnnd(q, r, n1, n0, d) \
76 do { \
77 UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \
78 __d1 = (d >> 32); \
79 __d0 = (USItype)d; \
80 \
81 __r1 = (n1) % __d1; \
82 __q1 = (n1) / __d1; \
83 __m = (UWtype) __q1 * __d0; \
84 __r1 = (__r1 << 32) | (n0 >> 32); \
85 if (__r1 < __m) \
86 { \
87 __q1--, __r1 += (d); \
88 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */ \
89 if (__r1 < __m) \
90 __q1--, __r1 += (d); \
91 } \
92 __r1 -= __m; \
93 \
94 __r0 = __r1 % __d1; \
95 __q0 = __r1 / __d1; \
96 __m = (UWtype) __q0 * __d0; \
97 __r0 = (__r0 << 32) | ((USItype)n0); \
98 if (__r0 < __m) \
99 { \
100 __q0--, __r0 += (d); \
101 if (__r0 >= (d)) \
102 if (__r0 < __m) \
103 __q0--, __r0 += (d); \
104 } \
105 __r0 -= __m; \
106 \
107 (q) = (UWtype) (__q1 << 32) | __q0; \
108 (r) = __r0; \
109 } while (0)
110
111#define UDIV_NEEDS_NORMALIZATION 1
112
113#define abort() \
114 return 0
115
116#ifdef __BIG_ENDIAN
117#define __BYTE_ORDER __BIG_ENDIAN
118#else
119#define __BYTE_ORDER __LITTLE_ENDIAN
120#endif