diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/m68k/math-emu/fp_arith.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/m68k/math-emu/fp_arith.c')
-rw-r--r-- | arch/m68k/math-emu/fp_arith.c | 701 |
1 files changed, 701 insertions, 0 deletions
diff --git a/arch/m68k/math-emu/fp_arith.c b/arch/m68k/math-emu/fp_arith.c new file mode 100644 index 000000000000..08f286db3c5a --- /dev/null +++ b/arch/m68k/math-emu/fp_arith.c | |||
@@ -0,0 +1,701 @@ | |||
1 | /* | ||
2 | |||
3 | fp_arith.c: floating-point math routines for the Linux-m68k | ||
4 | floating point emulator. | ||
5 | |||
6 | Copyright (c) 1998-1999 David Huggins-Daines. | ||
7 | |||
8 | Somewhat based on the AlphaLinux floating point emulator, by David | ||
9 | Mosberger-Tang. | ||
10 | |||
11 | You may copy, modify, and redistribute this file under the terms of | ||
12 | the GNU General Public License, version 2, or any later version, at | ||
13 | your convenience. | ||
14 | */ | ||
15 | |||
16 | #include "fp_emu.h" | ||
17 | #include "multi_arith.h" | ||
18 | #include "fp_arith.h" | ||
19 | |||
20 | const struct fp_ext fp_QNaN = | ||
21 | { | ||
22 | .exp = 0x7fff, | ||
23 | .mant = { .m64 = ~0 } | ||
24 | }; | ||
25 | |||
26 | const struct fp_ext fp_Inf = | ||
27 | { | ||
28 | .exp = 0x7fff, | ||
29 | }; | ||
30 | |||
31 | /* let's start with the easy ones */ | ||
32 | |||
33 | struct fp_ext * | ||
34 | fp_fabs(struct fp_ext *dest, struct fp_ext *src) | ||
35 | { | ||
36 | dprint(PINSTR, "fabs\n"); | ||
37 | |||
38 | fp_monadic_check(dest, src); | ||
39 | |||
40 | dest->sign = 0; | ||
41 | |||
42 | return dest; | ||
43 | } | ||
44 | |||
45 | struct fp_ext * | ||
46 | fp_fneg(struct fp_ext *dest, struct fp_ext *src) | ||
47 | { | ||
48 | dprint(PINSTR, "fneg\n"); | ||
49 | |||
50 | fp_monadic_check(dest, src); | ||
51 | |||
52 | dest->sign = !dest->sign; | ||
53 | |||
54 | return dest; | ||
55 | } | ||
56 | |||
57 | /* Now, the slightly harder ones */ | ||
58 | |||
59 | /* fp_fadd: Implements the kernel of the FADD, FSADD, FDADD, FSUB, | ||
60 | FDSUB, and FCMP instructions. */ | ||
61 | |||
62 | struct fp_ext * | ||
63 | fp_fadd(struct fp_ext *dest, struct fp_ext *src) | ||
64 | { | ||
65 | int diff; | ||
66 | |||
67 | dprint(PINSTR, "fadd\n"); | ||
68 | |||
69 | fp_dyadic_check(dest, src); | ||
70 | |||
71 | if (IS_INF(dest)) { | ||
72 | /* infinity - infinity == NaN */ | ||
73 | if (IS_INF(src) && (src->sign != dest->sign)) | ||
74 | fp_set_nan(dest); | ||
75 | return dest; | ||
76 | } | ||
77 | if (IS_INF(src)) { | ||
78 | fp_copy_ext(dest, src); | ||
79 | return dest; | ||
80 | } | ||
81 | |||
82 | if (IS_ZERO(dest)) { | ||
83 | if (IS_ZERO(src)) { | ||
84 | if (src->sign != dest->sign) { | ||
85 | if (FPDATA->rnd == FPCR_ROUND_RM) | ||
86 | dest->sign = 1; | ||
87 | else | ||
88 | dest->sign = 0; | ||
89 | } | ||
90 | } else | ||
91 | fp_copy_ext(dest, src); | ||
92 | return dest; | ||
93 | } | ||
94 | |||
95 | dest->lowmant = src->lowmant = 0; | ||
96 | |||
97 | if ((diff = dest->exp - src->exp) > 0) | ||
98 | fp_denormalize(src, diff); | ||
99 | else if ((diff = -diff) > 0) | ||
100 | fp_denormalize(dest, diff); | ||
101 | |||
102 | if (dest->sign == src->sign) { | ||
103 | if (fp_addmant(dest, src)) | ||
104 | if (!fp_addcarry(dest)) | ||
105 | return dest; | ||
106 | } else { | ||
107 | if (dest->mant.m64 < src->mant.m64) { | ||
108 | fp_submant(dest, src, dest); | ||
109 | dest->sign = !dest->sign; | ||
110 | } else | ||
111 | fp_submant(dest, dest, src); | ||
112 | } | ||
113 | |||
114 | return dest; | ||
115 | } | ||
116 | |||
117 | /* fp_fsub: Implements the kernel of the FSUB, FSSUB, and FDSUB | ||
118 | instructions. | ||
119 | |||
120 | Remember that the arguments are in assembler-syntax order! */ | ||
121 | |||
122 | struct fp_ext * | ||
123 | fp_fsub(struct fp_ext *dest, struct fp_ext *src) | ||
124 | { | ||
125 | dprint(PINSTR, "fsub "); | ||
126 | |||
127 | src->sign = !src->sign; | ||
128 | return fp_fadd(dest, src); | ||
129 | } | ||
130 | |||
131 | |||
132 | struct fp_ext * | ||
133 | fp_fcmp(struct fp_ext *dest, struct fp_ext *src) | ||
134 | { | ||
135 | dprint(PINSTR, "fcmp "); | ||
136 | |||
137 | FPDATA->temp[1] = *dest; | ||
138 | src->sign = !src->sign; | ||
139 | return fp_fadd(&FPDATA->temp[1], src); | ||
140 | } | ||
141 | |||
142 | struct fp_ext * | ||
143 | fp_ftst(struct fp_ext *dest, struct fp_ext *src) | ||
144 | { | ||
145 | dprint(PINSTR, "ftst\n"); | ||
146 | |||
147 | (void)dest; | ||
148 | |||
149 | return src; | ||
150 | } | ||
151 | |||
152 | struct fp_ext * | ||
153 | fp_fmul(struct fp_ext *dest, struct fp_ext *src) | ||
154 | { | ||
155 | union fp_mant128 temp; | ||
156 | int exp; | ||
157 | |||
158 | dprint(PINSTR, "fmul\n"); | ||
159 | |||
160 | fp_dyadic_check(dest, src); | ||
161 | |||
162 | /* calculate the correct sign now, as it's necessary for infinities */ | ||
163 | dest->sign = src->sign ^ dest->sign; | ||
164 | |||
165 | /* Handle infinities */ | ||
166 | if (IS_INF(dest)) { | ||
167 | if (IS_ZERO(src)) | ||
168 | fp_set_nan(dest); | ||
169 | return dest; | ||
170 | } | ||
171 | if (IS_INF(src)) { | ||
172 | if (IS_ZERO(dest)) | ||
173 | fp_set_nan(dest); | ||
174 | else | ||
175 | fp_copy_ext(dest, src); | ||
176 | return dest; | ||
177 | } | ||
178 | |||
179 | /* Of course, as we all know, zero * anything = zero. You may | ||
180 | not have known that it might be a positive or negative | ||
181 | zero... */ | ||
182 | if (IS_ZERO(dest) || IS_ZERO(src)) { | ||
183 | dest->exp = 0; | ||
184 | dest->mant.m64 = 0; | ||
185 | dest->lowmant = 0; | ||
186 | |||
187 | return dest; | ||
188 | } | ||
189 | |||
190 | exp = dest->exp + src->exp - 0x3ffe; | ||
191 | |||
192 | /* shift up the mantissa for denormalized numbers, | ||
193 | so that the highest bit is set, this makes the | ||
194 | shift of the result below easier */ | ||
195 | if ((long)dest->mant.m32[0] >= 0) | ||
196 | exp -= fp_overnormalize(dest); | ||
197 | if ((long)src->mant.m32[0] >= 0) | ||
198 | exp -= fp_overnormalize(src); | ||
199 | |||
200 | /* now, do a 64-bit multiply with expansion */ | ||
201 | fp_multiplymant(&temp, dest, src); | ||
202 | |||
203 | /* normalize it back to 64 bits and stuff it back into the | ||
204 | destination struct */ | ||
205 | if ((long)temp.m32[0] > 0) { | ||
206 | exp--; | ||
207 | fp_putmant128(dest, &temp, 1); | ||
208 | } else | ||
209 | fp_putmant128(dest, &temp, 0); | ||
210 | |||
211 | if (exp >= 0x7fff) { | ||
212 | fp_set_ovrflw(dest); | ||
213 | return dest; | ||
214 | } | ||
215 | dest->exp = exp; | ||
216 | if (exp < 0) { | ||
217 | fp_set_sr(FPSR_EXC_UNFL); | ||
218 | fp_denormalize(dest, -exp); | ||
219 | } | ||
220 | |||
221 | return dest; | ||
222 | } | ||
223 | |||
224 | /* fp_fdiv: Implements the "kernel" of the FDIV, FSDIV, FDDIV and | ||
225 | FSGLDIV instructions. | ||
226 | |||
227 | Note that the order of the operands is counter-intuitive: instead | ||
228 | of src / dest, the result is actually dest / src. */ | ||
229 | |||
230 | struct fp_ext * | ||
231 | fp_fdiv(struct fp_ext *dest, struct fp_ext *src) | ||
232 | { | ||
233 | union fp_mant128 temp; | ||
234 | int exp; | ||
235 | |||
236 | dprint(PINSTR, "fdiv\n"); | ||
237 | |||
238 | fp_dyadic_check(dest, src); | ||
239 | |||
240 | /* calculate the correct sign now, as it's necessary for infinities */ | ||
241 | dest->sign = src->sign ^ dest->sign; | ||
242 | |||
243 | /* Handle infinities */ | ||
244 | if (IS_INF(dest)) { | ||
245 | /* infinity / infinity = NaN (quiet, as always) */ | ||
246 | if (IS_INF(src)) | ||
247 | fp_set_nan(dest); | ||
248 | /* infinity / anything else = infinity (with approprate sign) */ | ||
249 | return dest; | ||
250 | } | ||
251 | if (IS_INF(src)) { | ||
252 | /* anything / infinity = zero (with appropriate sign) */ | ||
253 | dest->exp = 0; | ||
254 | dest->mant.m64 = 0; | ||
255 | dest->lowmant = 0; | ||
256 | |||
257 | return dest; | ||
258 | } | ||
259 | |||
260 | /* zeroes */ | ||
261 | if (IS_ZERO(dest)) { | ||
262 | /* zero / zero = NaN */ | ||
263 | if (IS_ZERO(src)) | ||
264 | fp_set_nan(dest); | ||
265 | /* zero / anything else = zero */ | ||
266 | return dest; | ||
267 | } | ||
268 | if (IS_ZERO(src)) { | ||
269 | /* anything / zero = infinity (with appropriate sign) */ | ||
270 | fp_set_sr(FPSR_EXC_DZ); | ||
271 | dest->exp = 0x7fff; | ||
272 | dest->mant.m64 = 0; | ||
273 | |||
274 | return dest; | ||
275 | } | ||
276 | |||
277 | exp = dest->exp - src->exp + 0x3fff; | ||
278 | |||
279 | /* shift up the mantissa for denormalized numbers, | ||
280 | so that the highest bit is set, this makes lots | ||
281 | of things below easier */ | ||
282 | if ((long)dest->mant.m32[0] >= 0) | ||
283 | exp -= fp_overnormalize(dest); | ||
284 | if ((long)src->mant.m32[0] >= 0) | ||
285 | exp -= fp_overnormalize(src); | ||
286 | |||
287 | /* now, do the 64-bit divide */ | ||
288 | fp_dividemant(&temp, dest, src); | ||
289 | |||
290 | /* normalize it back to 64 bits and stuff it back into the | ||
291 | destination struct */ | ||
292 | if (!temp.m32[0]) { | ||
293 | exp--; | ||
294 | fp_putmant128(dest, &temp, 32); | ||
295 | } else | ||
296 | fp_putmant128(dest, &temp, 31); | ||
297 | |||
298 | if (exp >= 0x7fff) { | ||
299 | fp_set_ovrflw(dest); | ||
300 | return dest; | ||
301 | } | ||
302 | dest->exp = exp; | ||
303 | if (exp < 0) { | ||
304 | fp_set_sr(FPSR_EXC_UNFL); | ||
305 | fp_denormalize(dest, -exp); | ||
306 | } | ||
307 | |||
308 | return dest; | ||
309 | } | ||
310 | |||
311 | struct fp_ext * | ||
312 | fp_fsglmul(struct fp_ext *dest, struct fp_ext *src) | ||
313 | { | ||
314 | int exp; | ||
315 | |||
316 | dprint(PINSTR, "fsglmul\n"); | ||
317 | |||
318 | fp_dyadic_check(dest, src); | ||
319 | |||
320 | /* calculate the correct sign now, as it's necessary for infinities */ | ||
321 | dest->sign = src->sign ^ dest->sign; | ||
322 | |||
323 | /* Handle infinities */ | ||
324 | if (IS_INF(dest)) { | ||
325 | if (IS_ZERO(src)) | ||
326 | fp_set_nan(dest); | ||
327 | return dest; | ||
328 | } | ||
329 | if (IS_INF(src)) { | ||
330 | if (IS_ZERO(dest)) | ||
331 | fp_set_nan(dest); | ||
332 | else | ||
333 | fp_copy_ext(dest, src); | ||
334 | return dest; | ||
335 | } | ||
336 | |||
337 | /* Of course, as we all know, zero * anything = zero. You may | ||
338 | not have known that it might be a positive or negative | ||
339 | zero... */ | ||
340 | if (IS_ZERO(dest) || IS_ZERO(src)) { | ||
341 | dest->exp = 0; | ||
342 | dest->mant.m64 = 0; | ||
343 | dest->lowmant = 0; | ||
344 | |||
345 | return dest; | ||
346 | } | ||
347 | |||
348 | exp = dest->exp + src->exp - 0x3ffe; | ||
349 | |||
350 | /* do a 32-bit multiply */ | ||
351 | fp_mul64(dest->mant.m32[0], dest->mant.m32[1], | ||
352 | dest->mant.m32[0] & 0xffffff00, | ||
353 | src->mant.m32[0] & 0xffffff00); | ||
354 | |||
355 | if (exp >= 0x7fff) { | ||
356 | fp_set_ovrflw(dest); | ||
357 | return dest; | ||
358 | } | ||
359 | dest->exp = exp; | ||
360 | if (exp < 0) { | ||
361 | fp_set_sr(FPSR_EXC_UNFL); | ||
362 | fp_denormalize(dest, -exp); | ||
363 | } | ||
364 | |||
365 | return dest; | ||
366 | } | ||
367 | |||
368 | struct fp_ext * | ||
369 | fp_fsgldiv(struct fp_ext *dest, struct fp_ext *src) | ||
370 | { | ||
371 | int exp; | ||
372 | unsigned long quot, rem; | ||
373 | |||
374 | dprint(PINSTR, "fsgldiv\n"); | ||
375 | |||
376 | fp_dyadic_check(dest, src); | ||
377 | |||
378 | /* calculate the correct sign now, as it's necessary for infinities */ | ||
379 | dest->sign = src->sign ^ dest->sign; | ||
380 | |||
381 | /* Handle infinities */ | ||
382 | if (IS_INF(dest)) { | ||
383 | /* infinity / infinity = NaN (quiet, as always) */ | ||
384 | if (IS_INF(src)) | ||
385 | fp_set_nan(dest); | ||
386 | /* infinity / anything else = infinity (with approprate sign) */ | ||
387 | return dest; | ||
388 | } | ||
389 | if (IS_INF(src)) { | ||
390 | /* anything / infinity = zero (with appropriate sign) */ | ||
391 | dest->exp = 0; | ||
392 | dest->mant.m64 = 0; | ||
393 | dest->lowmant = 0; | ||
394 | |||
395 | return dest; | ||
396 | } | ||
397 | |||
398 | /* zeroes */ | ||
399 | if (IS_ZERO(dest)) { | ||
400 | /* zero / zero = NaN */ | ||
401 | if (IS_ZERO(src)) | ||
402 | fp_set_nan(dest); | ||
403 | /* zero / anything else = zero */ | ||
404 | return dest; | ||
405 | } | ||
406 | if (IS_ZERO(src)) { | ||
407 | /* anything / zero = infinity (with appropriate sign) */ | ||
408 | fp_set_sr(FPSR_EXC_DZ); | ||
409 | dest->exp = 0x7fff; | ||
410 | dest->mant.m64 = 0; | ||
411 | |||
412 | return dest; | ||
413 | } | ||
414 | |||
415 | exp = dest->exp - src->exp + 0x3fff; | ||
416 | |||
417 | dest->mant.m32[0] &= 0xffffff00; | ||
418 | src->mant.m32[0] &= 0xffffff00; | ||
419 | |||
420 | /* do the 32-bit divide */ | ||
421 | if (dest->mant.m32[0] >= src->mant.m32[0]) { | ||
422 | fp_sub64(dest->mant, src->mant); | ||
423 | fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]); | ||
424 | dest->mant.m32[0] = 0x80000000 | (quot >> 1); | ||
425 | dest->mant.m32[1] = (quot & 1) | rem; /* only for rounding */ | ||
426 | } else { | ||
427 | fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]); | ||
428 | dest->mant.m32[0] = quot; | ||
429 | dest->mant.m32[1] = rem; /* only for rounding */ | ||
430 | exp--; | ||
431 | } | ||
432 | |||
433 | if (exp >= 0x7fff) { | ||
434 | fp_set_ovrflw(dest); | ||
435 | return dest; | ||
436 | } | ||
437 | dest->exp = exp; | ||
438 | if (exp < 0) { | ||
439 | fp_set_sr(FPSR_EXC_UNFL); | ||
440 | fp_denormalize(dest, -exp); | ||
441 | } | ||
442 | |||
443 | return dest; | ||
444 | } | ||
445 | |||
446 | /* fp_roundint: Internal rounding function for use by several of these | ||
447 | emulated instructions. | ||
448 | |||
449 | This one rounds off the fractional part using the rounding mode | ||
450 | specified. */ | ||
451 | |||
452 | static void fp_roundint(struct fp_ext *dest, int mode) | ||
453 | { | ||
454 | union fp_mant64 oldmant; | ||
455 | unsigned long mask; | ||
456 | |||
457 | if (!fp_normalize_ext(dest)) | ||
458 | return; | ||
459 | |||
460 | /* infinities and zeroes */ | ||
461 | if (IS_INF(dest) || IS_ZERO(dest)) | ||
462 | return; | ||
463 | |||
464 | /* first truncate the lower bits */ | ||
465 | oldmant = dest->mant; | ||
466 | switch (dest->exp) { | ||
467 | case 0 ... 0x3ffe: | ||
468 | dest->mant.m64 = 0; | ||
469 | break; | ||
470 | case 0x3fff ... 0x401e: | ||
471 | dest->mant.m32[0] &= 0xffffffffU << (0x401e - dest->exp); | ||
472 | dest->mant.m32[1] = 0; | ||
473 | if (oldmant.m64 == dest->mant.m64) | ||
474 | return; | ||
475 | break; | ||
476 | case 0x401f ... 0x403e: | ||
477 | dest->mant.m32[1] &= 0xffffffffU << (0x403e - dest->exp); | ||
478 | if (oldmant.m32[1] == dest->mant.m32[1]) | ||
479 | return; | ||
480 | break; | ||
481 | default: | ||
482 | return; | ||
483 | } | ||
484 | fp_set_sr(FPSR_EXC_INEX2); | ||
485 | |||
486 | /* We might want to normalize upwards here... however, since | ||
487 | we know that this is only called on the output of fp_fdiv, | ||
488 | or with the input to fp_fint or fp_fintrz, and the inputs | ||
489 | to all these functions are either normal or denormalized | ||
490 | (no subnormals allowed!), there's really no need. | ||
491 | |||
492 | In the case of fp_fdiv, observe that 0x80000000 / 0xffff = | ||
493 | 0xffff8000, and the same holds for 128-bit / 64-bit. (i.e. the | ||
494 | smallest possible normal dividend and the largest possible normal | ||
495 | divisor will still produce a normal quotient, therefore, (normal | ||
496 | << 64) / normal is normal in all cases) */ | ||
497 | |||
498 | switch (mode) { | ||
499 | case FPCR_ROUND_RN: | ||
500 | switch (dest->exp) { | ||
501 | case 0 ... 0x3ffd: | ||
502 | return; | ||
503 | case 0x3ffe: | ||
504 | /* As noted above, the input is always normal, so the | ||
505 | guard bit (bit 63) is always set. therefore, the | ||
506 | only case in which we will NOT round to 1.0 is when | ||
507 | the input is exactly 0.5. */ | ||
508 | if (oldmant.m64 == (1ULL << 63)) | ||
509 | return; | ||
510 | break; | ||
511 | case 0x3fff ... 0x401d: | ||
512 | mask = 1 << (0x401d - dest->exp); | ||
513 | if (!(oldmant.m32[0] & mask)) | ||
514 | return; | ||
515 | if (oldmant.m32[0] & (mask << 1)) | ||
516 | break; | ||
517 | if (!(oldmant.m32[0] << (dest->exp - 0x3ffd)) && | ||
518 | !oldmant.m32[1]) | ||
519 | return; | ||
520 | break; | ||
521 | case 0x401e: | ||
522 | if (!(oldmant.m32[1] >= 0)) | ||
523 | return; | ||
524 | if (oldmant.m32[0] & 1) | ||
525 | break; | ||
526 | if (!(oldmant.m32[1] << 1)) | ||
527 | return; | ||
528 | break; | ||
529 | case 0x401f ... 0x403d: | ||
530 | mask = 1 << (0x403d - dest->exp); | ||
531 | if (!(oldmant.m32[1] & mask)) | ||
532 | return; | ||
533 | if (oldmant.m32[1] & (mask << 1)) | ||
534 | break; | ||
535 | if (!(oldmant.m32[1] << (dest->exp - 0x401d))) | ||
536 | return; | ||
537 | break; | ||
538 | default: | ||
539 | return; | ||
540 | } | ||
541 | break; | ||
542 | case FPCR_ROUND_RZ: | ||
543 | return; | ||
544 | default: | ||
545 | if (dest->sign ^ (mode - FPCR_ROUND_RM)) | ||
546 | break; | ||
547 | return; | ||
548 | } | ||
549 | |||
550 | switch (dest->exp) { | ||
551 | case 0 ... 0x3ffe: | ||
552 | dest->exp = 0x3fff; | ||
553 | dest->mant.m64 = 1ULL << 63; | ||
554 | break; | ||
555 | case 0x3fff ... 0x401e: | ||
556 | mask = 1 << (0x401e - dest->exp); | ||
557 | if (dest->mant.m32[0] += mask) | ||
558 | break; | ||
559 | dest->mant.m32[0] = 0x80000000; | ||
560 | dest->exp++; | ||
561 | break; | ||
562 | case 0x401f ... 0x403e: | ||
563 | mask = 1 << (0x403e - dest->exp); | ||
564 | if (dest->mant.m32[1] += mask) | ||
565 | break; | ||
566 | if (dest->mant.m32[0] += 1) | ||
567 | break; | ||
568 | dest->mant.m32[0] = 0x80000000; | ||
569 | dest->exp++; | ||
570 | break; | ||
571 | } | ||
572 | } | ||
573 | |||
574 | /* modrem_kernel: Implementation of the FREM and FMOD instructions | ||
575 | (which are exactly the same, except for the rounding used on the | ||
576 | intermediate value) */ | ||
577 | |||
578 | static struct fp_ext * | ||
579 | modrem_kernel(struct fp_ext *dest, struct fp_ext *src, int mode) | ||
580 | { | ||
581 | struct fp_ext tmp; | ||
582 | |||
583 | fp_dyadic_check(dest, src); | ||
584 | |||
585 | /* Infinities and zeros */ | ||
586 | if (IS_INF(dest) || IS_ZERO(src)) { | ||
587 | fp_set_nan(dest); | ||
588 | return dest; | ||
589 | } | ||
590 | if (IS_ZERO(dest) || IS_INF(src)) | ||
591 | return dest; | ||
592 | |||
593 | /* FIXME: there is almost certainly a smarter way to do this */ | ||
594 | fp_copy_ext(&tmp, dest); | ||
595 | fp_fdiv(&tmp, src); /* NOTE: src might be modified */ | ||
596 | fp_roundint(&tmp, mode); | ||
597 | fp_fmul(&tmp, src); | ||
598 | fp_fsub(dest, &tmp); | ||
599 | |||
600 | /* set the quotient byte */ | ||
601 | fp_set_quotient((dest->mant.m64 & 0x7f) | (dest->sign << 7)); | ||
602 | return dest; | ||
603 | } | ||
604 | |||
605 | /* fp_fmod: Implements the kernel of the FMOD instruction. | ||
606 | |||
607 | Again, the argument order is backwards. The result, as defined in | ||
608 | the Motorola manuals, is: | ||
609 | |||
610 | fmod(src,dest) = (dest - (src * floor(dest / src))) */ | ||
611 | |||
612 | struct fp_ext * | ||
613 | fp_fmod(struct fp_ext *dest, struct fp_ext *src) | ||
614 | { | ||
615 | dprint(PINSTR, "fmod\n"); | ||
616 | return modrem_kernel(dest, src, FPCR_ROUND_RZ); | ||
617 | } | ||
618 | |||
619 | /* fp_frem: Implements the kernel of the FREM instruction. | ||
620 | |||
621 | frem(src,dest) = (dest - (src * round(dest / src))) | ||
622 | */ | ||
623 | |||
624 | struct fp_ext * | ||
625 | fp_frem(struct fp_ext *dest, struct fp_ext *src) | ||
626 | { | ||
627 | dprint(PINSTR, "frem\n"); | ||
628 | return modrem_kernel(dest, src, FPCR_ROUND_RN); | ||
629 | } | ||
630 | |||
631 | struct fp_ext * | ||
632 | fp_fint(struct fp_ext *dest, struct fp_ext *src) | ||
633 | { | ||
634 | dprint(PINSTR, "fint\n"); | ||
635 | |||
636 | fp_copy_ext(dest, src); | ||
637 | |||
638 | fp_roundint(dest, FPDATA->rnd); | ||
639 | |||
640 | return dest; | ||
641 | } | ||
642 | |||
643 | struct fp_ext * | ||
644 | fp_fintrz(struct fp_ext *dest, struct fp_ext *src) | ||
645 | { | ||
646 | dprint(PINSTR, "fintrz\n"); | ||
647 | |||
648 | fp_copy_ext(dest, src); | ||
649 | |||
650 | fp_roundint(dest, FPCR_ROUND_RZ); | ||
651 | |||
652 | return dest; | ||
653 | } | ||
654 | |||
655 | struct fp_ext * | ||
656 | fp_fscale(struct fp_ext *dest, struct fp_ext *src) | ||
657 | { | ||
658 | int scale, oldround; | ||
659 | |||
660 | dprint(PINSTR, "fscale\n"); | ||
661 | |||
662 | fp_dyadic_check(dest, src); | ||
663 | |||
664 | /* Infinities */ | ||
665 | if (IS_INF(src)) { | ||
666 | fp_set_nan(dest); | ||
667 | return dest; | ||
668 | } | ||
669 | if (IS_INF(dest)) | ||
670 | return dest; | ||
671 | |||
672 | /* zeroes */ | ||
673 | if (IS_ZERO(src) || IS_ZERO(dest)) | ||
674 | return dest; | ||
675 | |||
676 | /* Source exponent out of range */ | ||
677 | if (src->exp >= 0x400c) { | ||
678 | fp_set_ovrflw(dest); | ||
679 | return dest; | ||
680 | } | ||
681 | |||
682 | /* src must be rounded with round to zero. */ | ||
683 | oldround = FPDATA->rnd; | ||
684 | FPDATA->rnd = FPCR_ROUND_RZ; | ||
685 | scale = fp_conv_ext2long(src); | ||
686 | FPDATA->rnd = oldround; | ||
687 | |||
688 | /* new exponent */ | ||
689 | scale += dest->exp; | ||
690 | |||
691 | if (scale >= 0x7fff) { | ||
692 | fp_set_ovrflw(dest); | ||
693 | } else if (scale <= 0) { | ||
694 | fp_set_sr(FPSR_EXC_UNFL); | ||
695 | fp_denormalize(dest, -scale); | ||
696 | } else | ||
697 | dest->exp = scale; | ||
698 | |||
699 | return dest; | ||
700 | } | ||
701 | |||