aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/math-emu/poly.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-10-11 05:16:31 -0400
committerThomas Gleixner <tglx@linutronix.de>2007-10-11 05:16:31 -0400
commitda957e111bb0c189a4a3bf8a00caaecb59ed94ca (patch)
tree6916075fdd3e28869dcd3dfa2cf160a74d1cb02e /arch/x86/math-emu/poly.h
parent2ec1df4130c60d1eb49dc0fa0ed15858fede6b05 (diff)
i386: move math-emu
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/math-emu/poly.h')
-rw-r--r--arch/x86/math-emu/poly.h121
1 files changed, 121 insertions, 0 deletions
diff --git a/arch/x86/math-emu/poly.h b/arch/x86/math-emu/poly.h
new file mode 100644
index 000000000000..4db798114923
--- /dev/null
+++ b/arch/x86/math-emu/poly.h
@@ -0,0 +1,121 @@
1/*---------------------------------------------------------------------------+
2 | poly.h |
3 | |
4 | Header file for the FPU-emu poly*.c source files. |
5 | |
6 | Copyright (C) 1994,1999 |
7 | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
8 | Australia. E-mail billm@melbpc.org.au |
9 | |
10 | Declarations and definitions for functions operating on Xsig (12-byte |
11 | extended-significand) quantities. |
12 | |
13 +---------------------------------------------------------------------------*/
14
15#ifndef _POLY_H
16#define _POLY_H
17
18/* This 12-byte structure is used to improve the accuracy of computation
19 of transcendental functions.
20 Intended to be used to get results better than 8-byte computation
21 allows. 9-byte would probably be sufficient.
22 */
23typedef struct {
24 unsigned long lsw;
25 unsigned long midw;
26 unsigned long msw;
27} Xsig;
28
29asmlinkage void mul64(unsigned long long const *a, unsigned long long const *b,
30 unsigned long long *result);
31asmlinkage void polynomial_Xsig(Xsig *, const unsigned long long *x,
32 const unsigned long long terms[], const int n);
33
34asmlinkage void mul32_Xsig(Xsig *, const unsigned long mult);
35asmlinkage void mul64_Xsig(Xsig *, const unsigned long long *mult);
36asmlinkage void mul_Xsig_Xsig(Xsig *dest, const Xsig *mult);
37
38asmlinkage void shr_Xsig(Xsig *, const int n);
39asmlinkage int round_Xsig(Xsig *);
40asmlinkage int norm_Xsig(Xsig *);
41asmlinkage void div_Xsig(Xsig *x1, const Xsig *x2, const Xsig *dest);
42
43/* Macro to extract the most significant 32 bits from a long long */
44#define LL_MSW(x) (((unsigned long *)&x)[1])
45
46/* Macro to initialize an Xsig struct */
47#define MK_XSIG(a,b,c) { c, b, a }
48
49/* Macro to access the 8 ms bytes of an Xsig as a long long */
50#define XSIG_LL(x) (*(unsigned long long *)&x.midw)
51
52
53/*
54 Need to run gcc with optimizations on to get these to
55 actually be in-line.
56 */
57
58/* Multiply two fixed-point 32 bit numbers, producing a 32 bit result.
59 The answer is the ms word of the product. */
60/* Some versions of gcc make it difficult to stop eax from being clobbered.
61 Merely specifying that it is used doesn't work...
62 */
63static inline unsigned long mul_32_32(const unsigned long arg1,
64 const unsigned long arg2)
65{
66 int retval;
67 asm volatile ("mull %2; movl %%edx,%%eax" \
68 :"=a" (retval) \
69 :"0" (arg1), "g" (arg2) \
70 :"dx");
71 return retval;
72}
73
74
75/* Add the 12 byte Xsig x2 to Xsig dest, with no checks for overflow. */
76static inline void add_Xsig_Xsig(Xsig *dest, const Xsig *x2)
77{
78 asm volatile ("movl %1,%%edi; movl %2,%%esi;\n"
79 "movl (%%esi),%%eax; addl %%eax,(%%edi);\n"
80 "movl 4(%%esi),%%eax; adcl %%eax,4(%%edi);\n"
81 "movl 8(%%esi),%%eax; adcl %%eax,8(%%edi);\n"
82 :"=g" (*dest):"g" (dest), "g" (x2)
83 :"ax","si","di");
84}
85
86
87/* Add the 12 byte Xsig x2 to Xsig dest, adjust exp if overflow occurs. */
88/* Note: the constraints in the asm statement didn't always work properly
89 with gcc 2.5.8. Changing from using edi to using ecx got around the
90 problem, but keep fingers crossed! */
91static inline void add_two_Xsig(Xsig *dest, const Xsig *x2, long int *exp)
92{
93 asm volatile ("movl %2,%%ecx; movl %3,%%esi;\n"
94 "movl (%%esi),%%eax; addl %%eax,(%%ecx);\n"
95 "movl 4(%%esi),%%eax; adcl %%eax,4(%%ecx);\n"
96 "movl 8(%%esi),%%eax; adcl %%eax,8(%%ecx);\n"
97 "jnc 0f;\n"
98 "rcrl 8(%%ecx); rcrl 4(%%ecx); rcrl (%%ecx)\n"
99 "movl %4,%%ecx; incl (%%ecx)\n"
100 "movl $1,%%eax; jmp 1f;\n"
101 "0: xorl %%eax,%%eax;\n"
102 "1:\n"
103 :"=g" (*exp), "=g" (*dest)
104 :"g" (dest), "g" (x2), "g" (exp)
105 :"cx","si","ax");
106}
107
108
109/* Negate (subtract from 1.0) the 12 byte Xsig */
110/* This is faster in a loop on my 386 than using the "neg" instruction. */
111static inline void negate_Xsig(Xsig *x)
112{
113 asm volatile("movl %1,%%esi;\n"
114 "xorl %%ecx,%%ecx;\n"
115 "movl %%ecx,%%eax; subl (%%esi),%%eax; movl %%eax,(%%esi);\n"
116 "movl %%ecx,%%eax; sbbl 4(%%esi),%%eax; movl %%eax,4(%%esi);\n"
117 "movl %%ecx,%%eax; sbbl 8(%%esi),%%eax; movl %%eax,8(%%esi);\n"
118 :"=g" (*x):"g" (x):"si","ax","cx");
119}
120
121#endif /* _POLY_H */