aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/lib
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2005-06-29 13:10:54 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2005-06-29 13:10:54 -0400
commitc7e788766610bdc764d7150e69ace2e0c4196cf0 (patch)
treef2b0012a9677206b4662cf7c17a3ffa8ebc8c75d /arch/arm/lib
parentaaaa3f9e51245be3215ff67bb3c7aaf5abb82f00 (diff)
[PATCH] ARM: 2723/2: remove __udivdi3 and __umoddi3 from the kernel
Patch from Nicolas Pitre Those are big, slow and generally not recommended for kernel code. They are even not present on i386. So it should be concluded that one could as well get away with do_div() alone. Signed-off-by: Nicolas Pitre <nico@cam.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/lib')
-rw-r--r--arch/arm/lib/Makefile2
-rw-r--r--arch/arm/lib/longlong.h183
-rw-r--r--arch/arm/lib/udivdi3.c222
3 files changed, 1 insertions, 406 deletions
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index c0e65833ffc4..8725d63e4219 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -11,7 +11,7 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
11 strnlen_user.o strchr.o strrchr.o testchangebit.o \ 11 strnlen_user.o strchr.o strrchr.o testchangebit.o \
12 testclearbit.o testsetbit.o uaccess.o getuser.o \ 12 testclearbit.o testsetbit.o uaccess.o getuser.o \
13 putuser.o ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ 13 putuser.o ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
14 ucmpdi2.o udivdi3.o lib1funcs.o div64.o \ 14 ucmpdi2.o lib1funcs.o div64.o \
15 io-readsb.o io-writesb.o io-readsl.o io-writesl.o 15 io-readsb.o io-writesb.o io-readsl.o io-writesl.o
16 16
17ifeq ($(CONFIG_CPU_32v3),y) 17ifeq ($(CONFIG_CPU_32v3),y)
diff --git a/arch/arm/lib/longlong.h b/arch/arm/lib/longlong.h
deleted file mode 100644
index 90ae647e4d76..000000000000
--- a/arch/arm/lib/longlong.h
+++ /dev/null
@@ -1,183 +0,0 @@
1/* longlong.h -- based on code from gcc-2.95.3
2
3 definitions for mixed size 32/64 bit arithmetic.
4 Copyright (C) 1991, 92, 94, 95, 96, 1997, 1998 Free Software Foundation, Inc.
5
6 This definition file is free software; you can redistribute it
7 and/or modify it under the terms of the GNU General Public
8 License as published by the Free Software Foundation; either
9 version 2, or (at your option) any later version.
10
11 This definition file is distributed in the hope that it will be
12 useful, but WITHOUT ANY WARRANTY; without even the implied
13 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14 See the GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21/* Borrowed from GCC 2.95.3, I Molton 29/07/01 */
22
23#ifndef SI_TYPE_SIZE
24#define SI_TYPE_SIZE 32
25#endif
26
27#define __BITS4 (SI_TYPE_SIZE / 4)
28#define __ll_B (1L << (SI_TYPE_SIZE / 2))
29#define __ll_lowpart(t) ((u32) (t) % __ll_B)
30#define __ll_highpart(t) ((u32) (t) / __ll_B)
31
32/* Define auxiliary asm macros.
33
34 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand)
35 multiplies two u32 integers MULTIPLER and MULTIPLICAND,
36 and generates a two-part u32 product in HIGH_PROD and
37 LOW_PROD.
38
39 2) __umulsidi3(a,b) multiplies two u32 integers A and B,
40 and returns a u64 product. This is just a variant of umul_ppmm.
41
42 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
43 denominator) divides a two-word unsigned integer, composed by the
44 integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and
45 places the quotient in QUOTIENT and the remainder in REMAINDER.
46 HIGH_NUMERATOR must be less than DENOMINATOR for correct operation.
47 If, in addition, the most significant bit of DENOMINATOR must be 1,
48 then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1.
49
50 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
51 denominator). Like udiv_qrnnd but the numbers are signed. The
52 quotient is rounded towards 0.
53
54 5) count_leading_zeros(count, x) counts the number of zero-bits from
55 the msb to the first non-zero bit. This is the number of steps X
56 needs to be shifted left to set the msb. Undefined for X == 0.
57
58 6) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
59 high_addend_2, low_addend_2) adds two two-word unsigned integers,
60 composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and
61 LOW_ADDEND_2 respectively. The result is placed in HIGH_SUM and
62 LOW_SUM. Overflow (i.e. carry out) is not stored anywhere, and is
63 lost.
64
65 7) sub_ddmmss(high_difference, low_difference, high_minuend,
66 low_minuend, high_subtrahend, low_subtrahend) subtracts two
67 two-word unsigned integers, composed by HIGH_MINUEND_1 and
68 LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2
69 respectively. The result is placed in HIGH_DIFFERENCE and
70 LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
71 and is lost.
72
73 If any of these macros are left undefined for a particular CPU,
74 C macros are used. */
75
76#if defined (__arm__)
77#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
78 __asm__ ("adds %1, %4, %5 \n\
79 adc %0, %2, %3" \
80 : "=r" ((u32) (sh)), \
81 "=&r" ((u32) (sl)) \
82 : "%r" ((u32) (ah)), \
83 "rI" ((u32) (bh)), \
84 "%r" ((u32) (al)), \
85 "rI" ((u32) (bl)))
86#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
87 __asm__ ("subs %1, %4, %5 \n\
88 sbc %0, %2, %3" \
89 : "=r" ((u32) (sh)), \
90 "=&r" ((u32) (sl)) \
91 : "r" ((u32) (ah)), \
92 "rI" ((u32) (bh)), \
93 "r" ((u32) (al)), \
94 "rI" ((u32) (bl)))
95#define umul_ppmm(xh, xl, a, b) \
96{register u32 __t0, __t1, __t2; \
97 __asm__ ("%@ Inlined umul_ppmm \n\
98 mov %2, %5, lsr #16 \n\
99 mov %0, %6, lsr #16 \n\
100 bic %3, %5, %2, lsl #16 \n\
101 bic %4, %6, %0, lsl #16 \n\
102 mul %1, %3, %4 \n\
103 mul %4, %2, %4 \n\
104 mul %3, %0, %3 \n\
105 mul %0, %2, %0 \n\
106 adds %3, %4, %3 \n\
107 addcs %0, %0, #65536 \n\
108 adds %1, %1, %3, lsl #16 \n\
109 adc %0, %0, %3, lsr #16" \
110 : "=&r" ((u32) (xh)), \
111 "=r" ((u32) (xl)), \
112 "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \
113 : "r" ((u32) (a)), \
114 "r" ((u32) (b)));}
115#define UMUL_TIME 20
116#define UDIV_TIME 100
117#endif /* __arm__ */
118
119#define __umulsidi3(u, v) \
120 ({DIunion __w; \
121 umul_ppmm (__w.s.high, __w.s.low, u, v); \
122 __w.ll; })
123
124#define __udiv_qrnnd_c(q, r, n1, n0, d) \
125 do { \
126 u32 __d1, __d0, __q1, __q0; \
127 u32 __r1, __r0, __m; \
128 __d1 = __ll_highpart (d); \
129 __d0 = __ll_lowpart (d); \
130 \
131 __r1 = (n1) % __d1; \
132 __q1 = (n1) / __d1; \
133 __m = (u32) __q1 * __d0; \
134 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
135 if (__r1 < __m) \
136 { \
137 __q1--, __r1 += (d); \
138 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
139 if (__r1 < __m) \
140 __q1--, __r1 += (d); \
141 } \
142 __r1 -= __m; \
143 \
144 __r0 = __r1 % __d1; \
145 __q0 = __r1 / __d1; \
146 __m = (u32) __q0 * __d0; \
147 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
148 if (__r0 < __m) \
149 { \
150 __q0--, __r0 += (d); \
151 if (__r0 >= (d)) \
152 if (__r0 < __m) \
153 __q0--, __r0 += (d); \
154 } \
155 __r0 -= __m; \
156 \
157 (q) = (u32) __q1 * __ll_B | __q0; \
158 (r) = __r0; \
159 } while (0)
160
161#define UDIV_NEEDS_NORMALIZATION 1
162#define udiv_qrnnd __udiv_qrnnd_c
163
164#define count_leading_zeros(count, x) \
165 do { \
166 u32 __xr = (x); \
167 u32 __a; \
168 \
169 if (SI_TYPE_SIZE <= 32) \
170 { \
171 __a = __xr < ((u32)1<<2*__BITS4) \
172 ? (__xr < ((u32)1<<__BITS4) ? 0 : __BITS4) \
173 : (__xr < ((u32)1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
174 } \
175 else \
176 { \
177 for (__a = SI_TYPE_SIZE - 8; __a > 0; __a -= 8) \
178 if (((__xr >> __a) & 0xff) != 0) \
179 break; \
180 } \
181 \
182 (count) = SI_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
183 } while (0)
diff --git a/arch/arm/lib/udivdi3.c b/arch/arm/lib/udivdi3.c
deleted file mode 100644
index e343be4c6642..000000000000
--- a/arch/arm/lib/udivdi3.c
+++ /dev/null
@@ -1,222 +0,0 @@
1/* More subroutines needed by GCC output code on some machines. */
2/* Compile this one with gcc. */
3/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22/* As a special exception, if you link this library with other files,
23 some of which are compiled with GCC, to produce an executable,
24 this library does not by itself cause the resulting executable
25 to be covered by the GNU General Public License.
26 This exception does not however invalidate any other reasons why
27 the executable file might be covered by the GNU General Public License.
28 */
29/* support functions required by the kernel. based on code from gcc-2.95.3 */
30/* I Molton 29/07/01 */
31
32#include "gcclib.h"
33#include "longlong.h"
34
35static const u8 __clz_tab[] = {
36 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
37 5, 5, 5, 5, 5, 5, 5, 5,
38 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
39 6, 6, 6, 6, 6, 6, 6, 6,
40 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
41 7, 7, 7, 7, 7, 7, 7, 7,
42 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
43 7, 7, 7, 7, 7, 7, 7, 7,
44 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
45 8, 8, 8, 8, 8, 8, 8, 8,
46 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
47 8, 8, 8, 8, 8, 8, 8, 8,
48 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
49 8, 8, 8, 8, 8, 8, 8, 8,
50 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
51 8, 8, 8, 8, 8, 8, 8, 8,
52};
53
54u64 __udivmoddi4(u64 n, u64 d, u64 * rp)
55{
56 DIunion ww;
57 DIunion nn, dd;
58 DIunion rr;
59 u32 d0, d1, n0, n1, n2;
60 u32 q0, q1;
61 u32 b, bm;
62
63 nn.ll = n;
64 dd.ll = d;
65
66 d0 = dd.s.low;
67 d1 = dd.s.high;
68 n0 = nn.s.low;
69 n1 = nn.s.high;
70
71 if (d1 == 0) {
72 if (d0 > n1) {
73 /* 0q = nn / 0D */
74
75 count_leading_zeros(bm, d0);
76
77 if (bm != 0) {
78 /* Normalize, i.e. make the most significant bit of the
79 denominator set. */
80
81 d0 = d0 << bm;
82 n1 = (n1 << bm) | (n0 >> (SI_TYPE_SIZE - bm));
83 n0 = n0 << bm;
84 }
85
86 udiv_qrnnd(q0, n0, n1, n0, d0);
87 q1 = 0;
88
89 /* Remainder in n0 >> bm. */
90 } else {
91 /* qq = NN / 0d */
92
93 if (d0 == 0)
94 d0 = 1 / d0; /* Divide intentionally by zero. */
95
96 count_leading_zeros(bm, d0);
97
98 if (bm == 0) {
99 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
100 conclude (the most significant bit of n1 is set) /\ (the
101 leading quotient digit q1 = 1).
102
103 This special case is necessary, not an optimization.
104 (Shifts counts of SI_TYPE_SIZE are undefined.) */
105
106 n1 -= d0;
107 q1 = 1;
108 } else {
109 /* Normalize. */
110
111 b = SI_TYPE_SIZE - bm;
112
113 d0 = d0 << bm;
114 n2 = n1 >> b;
115 n1 = (n1 << bm) | (n0 >> b);
116 n0 = n0 << bm;
117
118 udiv_qrnnd(q1, n1, n2, n1, d0);
119 }
120
121 /* n1 != d0... */
122
123 udiv_qrnnd(q0, n0, n1, n0, d0);
124
125 /* Remainder in n0 >> bm. */
126 }
127
128 if (rp != 0) {
129 rr.s.low = n0 >> bm;
130 rr.s.high = 0;
131 *rp = rr.ll;
132 }
133 } else {
134 if (d1 > n1) {
135 /* 00 = nn / DD */
136
137 q0 = 0;
138 q1 = 0;
139
140 /* Remainder in n1n0. */
141 if (rp != 0) {
142 rr.s.low = n0;
143 rr.s.high = n1;
144 *rp = rr.ll;
145 }
146 } else {
147 /* 0q = NN / dd */
148
149 count_leading_zeros(bm, d1);
150 if (bm == 0) {
151 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
152 conclude (the most significant bit of n1 is set) /\ (the
153 quotient digit q0 = 0 or 1).
154
155 This special case is necessary, not an optimization. */
156
157 /* The condition on the next line takes advantage of that
158 n1 >= d1 (true due to program flow). */
159 if (n1 > d1 || n0 >= d0) {
160 q0 = 1;
161 sub_ddmmss(n1, n0, n1, n0, d1, d0);
162 } else
163 q0 = 0;
164
165 q1 = 0;
166
167 if (rp != 0) {
168 rr.s.low = n0;
169 rr.s.high = n1;
170 *rp = rr.ll;
171 }
172 } else {
173 u32 m1, m0;
174 /* Normalize. */
175
176 b = SI_TYPE_SIZE - bm;
177
178 d1 = (d1 << bm) | (d0 >> b);
179 d0 = d0 << bm;
180 n2 = n1 >> b;
181 n1 = (n1 << bm) | (n0 >> b);
182 n0 = n0 << bm;
183
184 udiv_qrnnd(q0, n1, n2, n1, d1);
185 umul_ppmm(m1, m0, q0, d0);
186
187 if (m1 > n1 || (m1 == n1 && m0 > n0)) {
188 q0--;
189 sub_ddmmss(m1, m0, m1, m0, d1, d0);
190 }
191
192 q1 = 0;
193
194 /* Remainder in (n1n0 - m1m0) >> bm. */
195 if (rp != 0) {
196 sub_ddmmss(n1, n0, n1, n0, m1, m0);
197 rr.s.low = (n1 << b) | (n0 >> bm);
198 rr.s.high = n1 >> bm;
199 *rp = rr.ll;
200 }
201 }
202 }
203 }
204
205 ww.s.low = q0;
206 ww.s.high = q1;
207 return ww.ll;
208}
209
210u64 __udivdi3(u64 n, u64 d)
211{
212 return __udivmoddi4(n, d, (u64 *) 0);
213}
214
215u64 __umoddi3(u64 u, u64 v)
216{
217 u64 w;
218
219 (void)__udivmoddi4(u, v, &w);
220
221 return w;
222}