diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/sparc/lib/umul.S |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/sparc/lib/umul.S')
-rw-r--r-- | arch/sparc/lib/umul.S | 169 |
1 files changed, 169 insertions, 0 deletions
diff --git a/arch/sparc/lib/umul.S b/arch/sparc/lib/umul.S new file mode 100644 index 000000000000..a784720a8a22 --- /dev/null +++ b/arch/sparc/lib/umul.S | |||
@@ -0,0 +1,169 @@ | |||
1 | /* $Id: umul.S,v 1.4 1996/09/30 02:22:39 davem Exp $ | ||
2 | * umul.S: This routine was taken from glibc-1.09 and is covered | ||
3 | * by the GNU Library General Public License Version 2. | ||
4 | */ | ||
5 | |||
6 | |||
7 | /* | ||
8 | * Unsigned multiply. Returns %o0 * %o1 in %o1%o0 (i.e., %o1 holds the | ||
9 | * upper 32 bits of the 64-bit product). | ||
10 | * | ||
11 | * This code optimizes short (less than 13-bit) multiplies. Short | ||
12 | * multiplies require 25 instruction cycles, and long ones require | ||
13 | * 45 instruction cycles. | ||
14 | * | ||
15 | * On return, overflow has occurred (%o1 is not zero) if and only if | ||
16 | * the Z condition code is clear, allowing, e.g., the following: | ||
17 | * | ||
18 | * call .umul | ||
19 | * nop | ||
20 | * bnz overflow (or tnz) | ||
21 | */ | ||
22 | |||
23 | .globl .umul | ||
24 | .umul: | ||
25 | or %o0, %o1, %o4 | ||
26 | mov %o0, %y ! multiplier -> Y | ||
27 | |||
28 | andncc %o4, 0xfff, %g0 ! test bits 12..31 of *both* args | ||
29 | be Lmul_shortway ! if zero, can do it the short way | ||
30 | andcc %g0, %g0, %o4 ! zero the partial product and clear N and V | ||
31 | |||
32 | /* | ||
33 | * Long multiply. 32 steps, followed by a final shift step. | ||
34 | */ | ||
35 | mulscc %o4, %o1, %o4 ! 1 | ||
36 | mulscc %o4, %o1, %o4 ! 2 | ||
37 | mulscc %o4, %o1, %o4 ! 3 | ||
38 | mulscc %o4, %o1, %o4 ! 4 | ||
39 | mulscc %o4, %o1, %o4 ! 5 | ||
40 | mulscc %o4, %o1, %o4 ! 6 | ||
41 | mulscc %o4, %o1, %o4 ! 7 | ||
42 | mulscc %o4, %o1, %o4 ! 8 | ||
43 | mulscc %o4, %o1, %o4 ! 9 | ||
44 | mulscc %o4, %o1, %o4 ! 10 | ||
45 | mulscc %o4, %o1, %o4 ! 11 | ||
46 | mulscc %o4, %o1, %o4 ! 12 | ||
47 | mulscc %o4, %o1, %o4 ! 13 | ||
48 | mulscc %o4, %o1, %o4 ! 14 | ||
49 | mulscc %o4, %o1, %o4 ! 15 | ||
50 | mulscc %o4, %o1, %o4 ! 16 | ||
51 | mulscc %o4, %o1, %o4 ! 17 | ||
52 | mulscc %o4, %o1, %o4 ! 18 | ||
53 | mulscc %o4, %o1, %o4 ! 19 | ||
54 | mulscc %o4, %o1, %o4 ! 20 | ||
55 | mulscc %o4, %o1, %o4 ! 21 | ||
56 | mulscc %o4, %o1, %o4 ! 22 | ||
57 | mulscc %o4, %o1, %o4 ! 23 | ||
58 | mulscc %o4, %o1, %o4 ! 24 | ||
59 | mulscc %o4, %o1, %o4 ! 25 | ||
60 | mulscc %o4, %o1, %o4 ! 26 | ||
61 | mulscc %o4, %o1, %o4 ! 27 | ||
62 | mulscc %o4, %o1, %o4 ! 28 | ||
63 | mulscc %o4, %o1, %o4 ! 29 | ||
64 | mulscc %o4, %o1, %o4 ! 30 | ||
65 | mulscc %o4, %o1, %o4 ! 31 | ||
66 | mulscc %o4, %o1, %o4 ! 32 | ||
67 | mulscc %o4, %g0, %o4 ! final shift | ||
68 | |||
69 | |||
70 | /* | ||
71 | * Normally, with the shift-and-add approach, if both numbers are | ||
72 | * positive you get the correct result. With 32-bit two's-complement | ||
73 | * numbers, -x is represented as | ||
74 | * | ||
75 | * x 32 | ||
76 | * ( 2 - ------ ) mod 2 * 2 | ||
77 | * 32 | ||
78 | * 2 | ||
79 | * | ||
80 | * (the `mod 2' subtracts 1 from 1.bbbb). To avoid lots of 2^32s, | ||
81 | * we can treat this as if the radix point were just to the left | ||
82 | * of the sign bit (multiply by 2^32), and get | ||
83 | * | ||
84 | * -x = (2 - x) mod 2 | ||
85 | * | ||
86 | * Then, ignoring the `mod 2's for convenience: | ||
87 | * | ||
88 | * x * y = xy | ||
89 | * -x * y = 2y - xy | ||
90 | * x * -y = 2x - xy | ||
91 | * -x * -y = 4 - 2x - 2y + xy | ||
92 | * | ||
93 | * For signed multiplies, we subtract (x << 32) from the partial | ||
94 | * product to fix this problem for negative multipliers (see mul.s). | ||
95 | * Because of the way the shift into the partial product is calculated | ||
96 | * (N xor V), this term is automatically removed for the multiplicand, | ||
97 | * so we don't have to adjust. | ||
98 | * | ||
99 | * But for unsigned multiplies, the high order bit wasn't a sign bit, | ||
100 | * and the correction is wrong. So for unsigned multiplies where the | ||
101 | * high order bit is one, we end up with xy - (y << 32). To fix it | ||
102 | * we add y << 32. | ||
103 | */ | ||
104 | #if 0 | ||
105 | tst %o1 | ||
106 | bl,a 1f ! if %o1 < 0 (high order bit = 1), | ||
107 | add %o4, %o0, %o4 ! %o4 += %o0 (add y to upper half) | ||
108 | |||
109 | 1: | ||
110 | rd %y, %o0 ! get lower half of product | ||
111 | retl | ||
112 | addcc %o4, %g0, %o1 ! put upper half in place and set Z for %o1==0 | ||
113 | #else | ||
114 | /* Faster code from tege@sics.se. */ | ||
115 | sra %o1, 31, %o2 ! make mask from sign bit | ||
116 | and %o0, %o2, %o2 ! %o2 = 0 or %o0, depending on sign of %o1 | ||
117 | rd %y, %o0 ! get lower half of product | ||
118 | retl | ||
119 | addcc %o4, %o2, %o1 ! add compensation and put upper half in place | ||
120 | #endif | ||
121 | |||
122 | Lmul_shortway: | ||
123 | /* | ||
124 | * Short multiply. 12 steps, followed by a final shift step. | ||
125 | * The resulting bits are off by 12 and (32-12) = 20 bit positions, | ||
126 | * but there is no problem with %o0 being negative (unlike above), | ||
127 | * and overflow is impossible (the answer is at most 24 bits long). | ||
128 | */ | ||
129 | mulscc %o4, %o1, %o4 ! 1 | ||
130 | mulscc %o4, %o1, %o4 ! 2 | ||
131 | mulscc %o4, %o1, %o4 ! 3 | ||
132 | mulscc %o4, %o1, %o4 ! 4 | ||
133 | mulscc %o4, %o1, %o4 ! 5 | ||
134 | mulscc %o4, %o1, %o4 ! 6 | ||
135 | mulscc %o4, %o1, %o4 ! 7 | ||
136 | mulscc %o4, %o1, %o4 ! 8 | ||
137 | mulscc %o4, %o1, %o4 ! 9 | ||
138 | mulscc %o4, %o1, %o4 ! 10 | ||
139 | mulscc %o4, %o1, %o4 ! 11 | ||
140 | mulscc %o4, %o1, %o4 ! 12 | ||
141 | mulscc %o4, %g0, %o4 ! final shift | ||
142 | |||
143 | /* | ||
144 | * %o4 has 20 of the bits that should be in the result; %y has | ||
145 | * the bottom 12 (as %y's top 12). That is: | ||
146 | * | ||
147 | * %o4 %y | ||
148 | * +----------------+----------------+ | ||
149 | * | -12- | -20- | -12- | -20- | | ||
150 | * +------(---------+------)---------+ | ||
151 | * -----result----- | ||
152 | * | ||
153 | * The 12 bits of %o4 left of the `result' area are all zero; | ||
154 | * in fact, all top 20 bits of %o4 are zero. | ||
155 | */ | ||
156 | |||
157 | rd %y, %o5 | ||
158 | sll %o4, 12, %o0 ! shift middle bits left 12 | ||
159 | srl %o5, 20, %o5 ! shift low bits right 20 | ||
160 | or %o5, %o0, %o0 | ||
161 | retl | ||
162 | addcc %g0, %g0, %o1 ! %o1 = zero, and set Z | ||
163 | |||
164 | .globl .umul_patch | ||
165 | .umul_patch: | ||
166 | umul %o0, %o1, %o0 | ||
167 | retl | ||
168 | rd %y, %o1 | ||
169 | nop | ||