aboutsummaryrefslogtreecommitdiffstats
path: root/lib/mpi
diff options
context:
space:
mode:
authorDmitry Kasatkin <dmitry.kasatkin@intel.com>2011-08-31 07:05:16 -0400
committerDmitry Kasatkin <dmitry.kasatkin@intel.com>2011-11-09 04:45:26 -0500
commit5ce3e312ec5c11abce13215be70700778bf601f0 (patch)
treeb3dae298502f4be39ce3a935e4692b494099a572 /lib/mpi
parentcdec9cb5167ab1113ba9c58e395f664d9d3f9acb (diff)
crypto: GnuPG based MPI lib - header files (part 2)
Adds the multi-precision-integer maths library which was originally taken from GnuPG and ported to the kernel by (among others) David Howells. This version is taken from Fedora kernel 2.6.32-71.14.1.el6. The difference is that checkpatch reported errors and warnings have been fixed. This library is used to implemenet RSA digital signature verification used in IMA/EVM integrity protection subsystem. Due to patch size limitation, the patch is divided into 4 parts. Signed-off-by: Dmitry Kasatkin <dmitry.kasatkin@intel.com>
Diffstat (limited to 'lib/mpi')
-rw-r--r--lib/mpi/longlong.h1478
-rw-r--r--lib/mpi/mpi-inline.h122
-rw-r--r--lib/mpi/mpi-internal.h261
3 files changed, 1861 insertions, 0 deletions
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
new file mode 100644
index 000000000000..b87487b40a8b
--- /dev/null
+++ b/lib/mpi/longlong.h
@@ -0,0 +1,1478 @@
1/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
2 * Note: I added some stuff for use with gnupg
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994, 1996, 1998,
5 * 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
6 *
7 * This file is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Library General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This file is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
15 * License for more details.
16 *
17 * You should have received a copy of the GNU Library General Public License
18 * along with this file; see the file COPYING.LIB. If not, write to
19 * the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20 * MA 02111-1307, USA. */
21
22/* You have to define the following before including this file:
23 *
24 * UWtype -- An unsigned type, default type for operations (typically a "word")
25 * UHWtype -- An unsigned type, at least half the size of UWtype.
26 * UDWtype -- An unsigned type, at least twice as large a UWtype
27 * W_TYPE_SIZE -- size in bits of UWtype
28 *
29 * SItype, USItype -- Signed and unsigned 32 bit types.
30 * DItype, UDItype -- Signed and unsigned 64 bit types.
31 *
32 * On a 32 bit machine UWtype should typically be USItype;
33 * on a 64 bit machine, UWtype should typically be UDItype.
34*/
35
36#define __BITS4 (W_TYPE_SIZE / 4)
37#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
38#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
39#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
40
41/* This is used to make sure no undesirable sharing between different libraries
42 that use this file takes place. */
43#ifndef __MPN
44#define __MPN(x) __##x
45#endif
46
47/* Define auxiliary asm macros.
48 *
49 * 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
50 * UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
51 * word product in HIGH_PROD and LOW_PROD.
52 *
53 * 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
54 * UDWtype product. This is just a variant of umul_ppmm.
55
56 * 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
57 * denominator) divides a UDWtype, composed by the UWtype integers
58 * HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
59 * in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less
60 * than DENOMINATOR for correct operation. If, in addition, the most
61 * significant bit of DENOMINATOR must be 1, then the pre-processor symbol
62 * UDIV_NEEDS_NORMALIZATION is defined to 1.
63 * 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
64 * denominator). Like udiv_qrnnd but the numbers are signed. The quotient
65 * is rounded towards 0.
66 *
67 * 5) count_leading_zeros(count, x) counts the number of zero-bits from the
68 * msb to the first non-zero bit in the UWtype X. This is the number of
69 * steps X needs to be shifted left to set the msb. Undefined for X == 0,
70 * unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value.
71 *
72 * 6) count_trailing_zeros(count, x) like count_leading_zeros, but counts
73 * from the least significant end.
74 *
75 * 7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
76 * high_addend_2, low_addend_2) adds two UWtype integers, composed by
77 * HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
78 * respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow
79 * (i.e. carry out) is not stored anywhere, and is lost.
80 *
81 * 8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
82 * high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
83 * composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
84 * LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE
85 * and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
86 * and is lost.
87 *
88 * If any of these macros are left undefined for a particular CPU,
89 * C macros are used. */
90
91/* The CPUs come in alphabetical order below.
92 *
93 * Please add support for more CPUs here, or improve the current support
94 * for the CPUs below! */
95
96#if defined(__GNUC__) && !defined(NO_ASM)
97
98/* We sometimes need to clobber "cc" with gcc2, but that would not be
99 understood by gcc1. Use cpp to avoid major code duplication. */
100#if __GNUC__ < 2
101#define __CLOBBER_CC
102#define __AND_CLOBBER_CC
103#else /* __GNUC__ >= 2 */
104#define __CLOBBER_CC : "cc"
105#define __AND_CLOBBER_CC , "cc"
106#endif /* __GNUC__ < 2 */
107
108/***************************************
109 ************** A29K *****************
110 ***************************************/
111#if (defined(__a29k__) || defined(_AM29K)) && W_TYPE_SIZE == 32
112#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
113 __asm__ ("add %1,%4,%5\n" \
114 "addc %0,%2,%3" \
115 : "=r" ((USItype)(sh)), \
116 "=&r" ((USItype)(sl)) \
117 : "%r" ((USItype)(ah)), \
118 "rI" ((USItype)(bh)), \
119 "%r" ((USItype)(al)), \
120 "rI" ((USItype)(bl)))
121#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
122 __asm__ ("sub %1,%4,%5\n" \
123 "subc %0,%2,%3" \
124 : "=r" ((USItype)(sh)), \
125 "=&r" ((USItype)(sl)) \
126 : "r" ((USItype)(ah)), \
127 "rI" ((USItype)(bh)), \
128 "r" ((USItype)(al)), \
129 "rI" ((USItype)(bl)))
130#define umul_ppmm(xh, xl, m0, m1) \
131do { \
132 USItype __m0 = (m0), __m1 = (m1); \
133 __asm__ ("multiplu %0,%1,%2" \
134 : "=r" ((USItype)(xl)) \
135 : "r" (__m0), \
136 "r" (__m1)); \
137 __asm__ ("multmu %0,%1,%2" \
138 : "=r" ((USItype)(xh)) \
139 : "r" (__m0), \
140 "r" (__m1)); \
141} while (0)
142#define udiv_qrnnd(q, r, n1, n0, d) \
143 __asm__ ("dividu %0,%3,%4" \
144 : "=r" ((USItype)(q)), \
145 "=q" ((USItype)(r)) \
146 : "1" ((USItype)(n1)), \
147 "r" ((USItype)(n0)), \
148 "r" ((USItype)(d)))
149
150#define count_leading_zeros(count, x) \
151 __asm__ ("clz %0,%1" \
152 : "=r" ((USItype)(count)) \
153 : "r" ((USItype)(x)))
154#define COUNT_LEADING_ZEROS_0 32
155#endif /* __a29k__ */
156
157#if defined(__alpha) && W_TYPE_SIZE == 64
158#define umul_ppmm(ph, pl, m0, m1) \
159do { \
160 UDItype __m0 = (m0), __m1 = (m1); \
161 __asm__ ("umulh %r1,%2,%0" \
162 : "=r" ((UDItype) ph) \
163 : "%rJ" (__m0), \
164 "rI" (__m1)); \
165 (pl) = __m0 * __m1; \
166 } while (0)
167#define UMUL_TIME 46
168#ifndef LONGLONG_STANDALONE
169#define udiv_qrnnd(q, r, n1, n0, d) \
170do { UDItype __r; \
171 (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \
172 (r) = __r; \
173} while (0)
174extern UDItype __udiv_qrnnd();
175#define UDIV_TIME 220
176#endif /* LONGLONG_STANDALONE */
177#endif /* __alpha */
178
179/***************************************
180 ************** ARM ******************
181 ***************************************/
182#if defined(__arm__) && W_TYPE_SIZE == 32
183#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
184 __asm__ ("adds %1, %4, %5\n" \
185 "adc %0, %2, %3" \
186 : "=r" ((USItype)(sh)), \
187 "=&r" ((USItype)(sl)) \
188 : "%r" ((USItype)(ah)), \
189 "rI" ((USItype)(bh)), \
190 "%r" ((USItype)(al)), \
191 "rI" ((USItype)(bl)))
192#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
193 __asm__ ("subs %1, %4, %5\n" \
194 "sbc %0, %2, %3" \
195 : "=r" ((USItype)(sh)), \
196 "=&r" ((USItype)(sl)) \
197 : "r" ((USItype)(ah)), \
198 "rI" ((USItype)(bh)), \
199 "r" ((USItype)(al)), \
200 "rI" ((USItype)(bl)))
201#if defined __ARM_ARCH_2__ || defined __ARM_ARCH_3__
202#define umul_ppmm(xh, xl, a, b) \
203 __asm__ ("%@ Inlined umul_ppmm\n" \
204 "mov %|r0, %2, lsr #16 @ AAAA\n" \
205 "mov %|r2, %3, lsr #16 @ BBBB\n" \
206 "bic %|r1, %2, %|r0, lsl #16 @ aaaa\n" \
207 "bic %0, %3, %|r2, lsl #16 @ bbbb\n" \
208 "mul %1, %|r1, %|r2 @ aaaa * BBBB\n" \
209 "mul %|r2, %|r0, %|r2 @ AAAA * BBBB\n" \
210 "mul %|r1, %0, %|r1 @ aaaa * bbbb\n" \
211 "mul %0, %|r0, %0 @ AAAA * bbbb\n" \
212 "adds %|r0, %1, %0 @ central sum\n" \
213 "addcs %|r2, %|r2, #65536\n" \
214 "adds %1, %|r1, %|r0, lsl #16\n" \
215 "adc %0, %|r2, %|r0, lsr #16" \
216 : "=&r" ((USItype)(xh)), \
217 "=r" ((USItype)(xl)) \
218 : "r" ((USItype)(a)), \
219 "r" ((USItype)(b)) \
220 : "r0", "r1", "r2")
221#else
222#define umul_ppmm(xh, xl, a, b) \
223 __asm__ ("%@ Inlined umul_ppmm\n" \
224 "umull %r1, %r0, %r2, %r3" \
225 : "=&r" ((USItype)(xh)), \
226 "=r" ((USItype)(xl)) \
227 : "r" ((USItype)(a)), \
228 "r" ((USItype)(b)) \
229 : "r0", "r1")
230#endif
231#define UMUL_TIME 20
232#define UDIV_TIME 100
233#endif /* __arm__ */
234
235/***************************************
236 ************** CLIPPER **************
237 ***************************************/
238#if defined(__clipper__) && W_TYPE_SIZE == 32
239#define umul_ppmm(w1, w0, u, v) \
240 ({union {UDItype __ll; \
241 struct {USItype __l, __h; } __i; \
242 } __xx; \
243 __asm__ ("mulwux %2,%0" \
244 : "=r" (__xx.__ll) \
245 : "%0" ((USItype)(u)), \
246 "r" ((USItype)(v))); \
247 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; })
248#define smul_ppmm(w1, w0, u, v) \
249 ({union {DItype __ll; \
250 struct {SItype __l, __h; } __i; \
251 } __xx; \
252 __asm__ ("mulwx %2,%0" \
253 : "=r" (__xx.__ll) \
254 : "%0" ((SItype)(u)), \
255 "r" ((SItype)(v))); \
256 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; })
257#define __umulsidi3(u, v) \
258 ({UDItype __w; \
259 __asm__ ("mulwux %2,%0" \
260 : "=r" (__w) \
261 : "%0" ((USItype)(u)), \
262 "r" ((USItype)(v))); \
263 __w; })
264#endif /* __clipper__ */
265
266/***************************************
267 ************** GMICRO ***************
268 ***************************************/
269#if defined(__gmicro__) && W_TYPE_SIZE == 32
270#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
271 __asm__ ("add.w %5,%1\n" \
272 "addx %3,%0" \
273 : "=g" ((USItype)(sh)), \
274 "=&g" ((USItype)(sl)) \
275 : "%0" ((USItype)(ah)), \
276 "g" ((USItype)(bh)), \
277 "%1" ((USItype)(al)), \
278 "g" ((USItype)(bl)))
279#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
280 __asm__ ("sub.w %5,%1\n" \
281 "subx %3,%0" \
282 : "=g" ((USItype)(sh)), \
283 "=&g" ((USItype)(sl)) \
284 : "0" ((USItype)(ah)), \
285 "g" ((USItype)(bh)), \
286 "1" ((USItype)(al)), \
287 "g" ((USItype)(bl)))
288#define umul_ppmm(ph, pl, m0, m1) \
289 __asm__ ("mulx %3,%0,%1" \
290 : "=g" ((USItype)(ph)), \
291 "=r" ((USItype)(pl)) \
292 : "%0" ((USItype)(m0)), \
293 "g" ((USItype)(m1)))
294#define udiv_qrnnd(q, r, nh, nl, d) \
295 __asm__ ("divx %4,%0,%1" \
296 : "=g" ((USItype)(q)), \
297 "=r" ((USItype)(r)) \
298 : "1" ((USItype)(nh)), \
299 "0" ((USItype)(nl)), \
300 "g" ((USItype)(d)))
301#define count_leading_zeros(count, x) \
302 __asm__ ("bsch/1 %1,%0" \
303 : "=g" (count) \
304 : "g" ((USItype)(x)), \
305 "0" ((USItype)0))
306#endif
307
308/***************************************
309 ************** HPPA *****************
310 ***************************************/
311#if defined(__hppa) && W_TYPE_SIZE == 32
312#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
313 __asm__ ("add %4,%5,%1\n" \
314 "addc %2,%3,%0" \
315 : "=r" ((USItype)(sh)), \
316 "=&r" ((USItype)(sl)) \
317 : "%rM" ((USItype)(ah)), \
318 "rM" ((USItype)(bh)), \
319 "%rM" ((USItype)(al)), \
320 "rM" ((USItype)(bl)))
321#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
322 __asm__ ("sub %4,%5,%1\n" \
323 "subb %2,%3,%0" \
324 : "=r" ((USItype)(sh)), \
325 "=&r" ((USItype)(sl)) \
326 : "rM" ((USItype)(ah)), \
327 "rM" ((USItype)(bh)), \
328 "rM" ((USItype)(al)), \
329 "rM" ((USItype)(bl)))
330#if defined(_PA_RISC1_1)
331#define umul_ppmm(wh, wl, u, v) \
332do { \
333 union {UDItype __ll; \
334 struct {USItype __h, __l; } __i; \
335 } __xx; \
336 __asm__ ("xmpyu %1,%2,%0" \
337 : "=*f" (__xx.__ll) \
338 : "*f" ((USItype)(u)), \
339 "*f" ((USItype)(v))); \
340 (wh) = __xx.__i.__h; \
341 (wl) = __xx.__i.__l; \
342} while (0)
343#define UMUL_TIME 8
344#define UDIV_TIME 60
345#else
346#define UMUL_TIME 40
347#define UDIV_TIME 80
348#endif
349#ifndef LONGLONG_STANDALONE
350#define udiv_qrnnd(q, r, n1, n0, d) \
351do { USItype __r; \
352 (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \
353 (r) = __r; \
354} while (0)
355extern USItype __udiv_qrnnd();
356#endif /* LONGLONG_STANDALONE */
357#define count_leading_zeros(count, x) \
358do { \
359 USItype __tmp; \
360 __asm__ ( \
361 "ldi 1,%0\n" \
362 "extru,= %1,15,16,%%r0 ; Bits 31..16 zero?\n" \
363 "extru,tr %1,15,16,%1 ; No. Shift down, skip add.\n" \
364 "ldo 16(%0),%0 ; Yes. Perform add.\n" \
365 "extru,= %1,23,8,%%r0 ; Bits 15..8 zero?\n" \
366 "extru,tr %1,23,8,%1 ; No. Shift down, skip add.\n" \
367 "ldo 8(%0),%0 ; Yes. Perform add.\n" \
368 "extru,= %1,27,4,%%r0 ; Bits 7..4 zero?\n" \
369 "extru,tr %1,27,4,%1 ; No. Shift down, skip add.\n" \
370 "ldo 4(%0),%0 ; Yes. Perform add.\n" \
371 "extru,= %1,29,2,%%r0 ; Bits 3..2 zero?\n" \
372 "extru,tr %1,29,2,%1 ; No. Shift down, skip add.\n" \
373 "ldo 2(%0),%0 ; Yes. Perform add.\n" \
374 "extru %1,30,1,%1 ; Extract bit 1.\n" \
375 "sub %0,%1,%0 ; Subtract it. " \
376 : "=r" (count), "=r" (__tmp) : "1" (x)); \
377} while (0)
378#endif /* hppa */
379
380/***************************************
381 ************** I370 *****************
382 ***************************************/
383#if (defined(__i370__) || defined(__mvs__)) && W_TYPE_SIZE == 32
384#define umul_ppmm(xh, xl, m0, m1) \
385do { \
386 union {UDItype __ll; \
387 struct {USItype __h, __l; } __i; \
388 } __xx; \
389 USItype __m0 = (m0), __m1 = (m1); \
390 __asm__ ("mr %0,%3" \
391 : "=r" (__xx.__i.__h), \
392 "=r" (__xx.__i.__l) \
393 : "%1" (__m0), \
394 "r" (__m1)); \
395 (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
396 (xh) += ((((SItype) __m0 >> 31) & __m1) \
397 + (((SItype) __m1 >> 31) & __m0)); \
398} while (0)
399#define smul_ppmm(xh, xl, m0, m1) \
400do { \
401 union {DItype __ll; \
402 struct {USItype __h, __l; } __i; \
403 } __xx; \
404 __asm__ ("mr %0,%3" \
405 : "=r" (__xx.__i.__h), \
406 "=r" (__xx.__i.__l) \
407 : "%1" (m0), \
408 "r" (m1)); \
409 (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
410} while (0)
411#define sdiv_qrnnd(q, r, n1, n0, d) \
412do { \
413 union {DItype __ll; \
414 struct {USItype __h, __l; } __i; \
415 } __xx; \
416 __xx.__i.__h = n1; __xx.__i.__l = n0; \
417 __asm__ ("dr %0,%2" \
418 : "=r" (__xx.__ll) \
419 : "0" (__xx.__ll), "r" (d)); \
420 (q) = __xx.__i.__l; (r) = __xx.__i.__h; \
421} while (0)
422#endif
423
424/***************************************
425 ************** I386 *****************
426 ***************************************/
427#undef __i386__
428#if (defined(__i386__) || defined(__i486__)) && W_TYPE_SIZE == 32
429#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
430 __asm__ ("addl %5,%1\n" \
431 "adcl %3,%0" \
432 : "=r" ((USItype)(sh)), \
433 "=&r" ((USItype)(sl)) \
434 : "%0" ((USItype)(ah)), \
435 "g" ((USItype)(bh)), \
436 "%1" ((USItype)(al)), \
437 "g" ((USItype)(bl)))
438#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
439 __asm__ ("subl %5,%1\n" \
440 "sbbl %3,%0" \
441 : "=r" ((USItype)(sh)), \
442 "=&r" ((USItype)(sl)) \
443 : "0" ((USItype)(ah)), \
444 "g" ((USItype)(bh)), \
445 "1" ((USItype)(al)), \
446 "g" ((USItype)(bl)))
447#define umul_ppmm(w1, w0, u, v) \
448 __asm__ ("mull %3" \
449 : "=a" ((USItype)(w0)), \
450 "=d" ((USItype)(w1)) \
451 : "%0" ((USItype)(u)), \
452 "rm" ((USItype)(v)))
453#define udiv_qrnnd(q, r, n1, n0, d) \
454 __asm__ ("divl %4" \
455 : "=a" ((USItype)(q)), \
456 "=d" ((USItype)(r)) \
457 : "0" ((USItype)(n0)), \
458 "1" ((USItype)(n1)), \
459 "rm" ((USItype)(d)))
460#define count_leading_zeros(count, x) \
461do { \
462 USItype __cbtmp; \
463 __asm__ ("bsrl %1,%0" \
464 : "=r" (__cbtmp) : "rm" ((USItype)(x))); \
465 (count) = __cbtmp ^ 31; \
466} while (0)
467#define count_trailing_zeros(count, x) \
468 __asm__ ("bsfl %1,%0" : "=r" (count) : "rm" ((USItype)(x)))
469#ifndef UMUL_TIME
470#define UMUL_TIME 40
471#endif
472#ifndef UDIV_TIME
473#define UDIV_TIME 40
474#endif
475#endif /* 80x86 */
476
477/***************************************
478 ************** I860 *****************
479 ***************************************/
480#if defined(__i860__) && W_TYPE_SIZE == 32
481#define rshift_rhlc(r, h, l, c) \
482 __asm__ ("shr %3,r0,r0\n" \
483 "shrd %1,%2,%0" \
484 "=r" (r) : "r" (h), "r" (l), "rn" (c))
485#endif /* i860 */
486
487/***************************************
488 ************** I960 *****************
489 ***************************************/
490#if defined(__i960__) && W_TYPE_SIZE == 32
491#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
492 __asm__ ("cmpo 1,0\n" \
493 "addc %5,%4,%1\n" \
494 "addc %3,%2,%0" \
495 : "=r" ((USItype)(sh)), \
496 "=&r" ((USItype)(sl)) \
497 : "%dI" ((USItype)(ah)), \
498 "dI" ((USItype)(bh)), \
499 "%dI" ((USItype)(al)), \
500 "dI" ((USItype)(bl)))
501#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
502 __asm__ ("cmpo 0,0\n" \
503 "subc %5,%4,%1\n" \
504 "subc %3,%2,%0" \
505 : "=r" ((USItype)(sh)), \
506 "=&r" ((USItype)(sl)) \
507 : "dI" ((USItype)(ah)), \
508 "dI" ((USItype)(bh)), \
509 "dI" ((USItype)(al)), \
510 "dI" ((USItype)(bl)))
511#define umul_ppmm(w1, w0, u, v) \
512 ({union {UDItype __ll; \
513 struct {USItype __l, __h; } __i; \
514 } __xx; \
515 __asm__ ("emul %2,%1,%0" \
516 : "=d" (__xx.__ll) \
517 : "%dI" ((USItype)(u)), \
518 "dI" ((USItype)(v))); \
519 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; })
520#define __umulsidi3(u, v) \
521 ({UDItype __w; \
522 __asm__ ("emul %2,%1,%0" \
523 : "=d" (__w) \
524 : "%dI" ((USItype)(u)), \
525 "dI" ((USItype)(v))); \
526 __w; })
527#define udiv_qrnnd(q, r, nh, nl, d) \
528do { \
529 union {UDItype __ll; \
530 struct {USItype __l, __h; } __i; \
531 } __nn; \
532 __nn.__i.__h = (nh); __nn.__i.__l = (nl); \
533 __asm__ ("ediv %d,%n,%0" \
534 : "=d" (__rq.__ll) \
535 : "dI" (__nn.__ll), \
536 "dI" ((USItype)(d))); \
537 (r) = __rq.__i.__l; (q) = __rq.__i.__h; \
538} while (0)
539#define count_leading_zeros(count, x) \
540do { \
541 USItype __cbtmp; \
542 __asm__ ("scanbit %1,%0" \
543 : "=r" (__cbtmp) \
544 : "r" ((USItype)(x))); \
545 (count) = __cbtmp ^ 31; \
546} while (0)
547#define COUNT_LEADING_ZEROS_0 (-32) /* sic */
548#if defined(__i960mx) /* what is the proper symbol to test??? */
549#define rshift_rhlc(r, h, l, c) \
550do { \
551 union {UDItype __ll; \
552 struct {USItype __l, __h; } __i; \
553 } __nn; \
554 __nn.__i.__h = (h); __nn.__i.__l = (l); \
555 __asm__ ("shre %2,%1,%0" \
556 : "=d" (r) : "dI" (__nn.__ll), "dI" (c)); \
557}
558#endif /* i960mx */
559#endif /* i960 */
560
561/***************************************
562 ************** 68000 ****************
563 ***************************************/
564#if (defined(__mc68000__) || defined(__mc68020__) || defined(__NeXT__) || defined(mc68020)) && W_TYPE_SIZE == 32
565#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
566 __asm__ ("add%.l %5,%1\n" \
567 "addx%.l %3,%0" \
568 : "=d" ((USItype)(sh)), \
569 "=&d" ((USItype)(sl)) \
570 : "%0" ((USItype)(ah)), \
571 "d" ((USItype)(bh)), \
572 "%1" ((USItype)(al)), \
573 "g" ((USItype)(bl)))
574#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
575 __asm__ ("sub%.l %5,%1\n" \
576 "subx%.l %3,%0" \
577 : "=d" ((USItype)(sh)), \
578 "=&d" ((USItype)(sl)) \
579 : "0" ((USItype)(ah)), \
580 "d" ((USItype)(bh)), \
581 "1" ((USItype)(al)), \
582 "g" ((USItype)(bl)))
583#if (defined(__mc68020__) || defined(__NeXT__) || defined(mc68020))
584#define umul_ppmm(w1, w0, u, v) \
585 __asm__ ("mulu%.l %3,%1:%0" \
586 : "=d" ((USItype)(w0)), \
587 "=d" ((USItype)(w1)) \
588 : "%0" ((USItype)(u)), \
589 "dmi" ((USItype)(v)))
590#define UMUL_TIME 45
591#define udiv_qrnnd(q, r, n1, n0, d) \
592 __asm__ ("divu%.l %4,%1:%0" \
593 : "=d" ((USItype)(q)), \
594 "=d" ((USItype)(r)) \
595 : "0" ((USItype)(n0)), \
596 "1" ((USItype)(n1)), \
597 "dmi" ((USItype)(d)))
598#define UDIV_TIME 90
599#define sdiv_qrnnd(q, r, n1, n0, d) \
600 __asm__ ("divs%.l %4,%1:%0" \
601 : "=d" ((USItype)(q)), \
602 "=d" ((USItype)(r)) \
603 : "0" ((USItype)(n0)), \
604 "1" ((USItype)(n1)), \
605 "dmi" ((USItype)(d)))
606#define count_leading_zeros(count, x) \
607 __asm__ ("bfffo %1{%b2:%b2},%0" \
608 : "=d" ((USItype)(count)) \
609 : "od" ((USItype)(x)), "n" (0))
610#define COUNT_LEADING_ZEROS_0 32
611#else /* not mc68020 */
612#define umul_ppmm(xh, xl, a, b) \
613do { USItype __umul_tmp1, __umul_tmp2; \
614 __asm__ ("| Inlined umul_ppmm\n" \
615 "move%.l %5,%3\n" \
616 "move%.l %2,%0\n" \
617 "move%.w %3,%1\n" \
618 "swap %3\n" \
619 "swap %0\n" \
620 "mulu %2,%1\n" \
621 "mulu %3,%0\n" \
622 "mulu %2,%3\n" \
623 "swap %2\n" \
624 "mulu %5,%2\n" \
625 "add%.l %3,%2\n" \
626 "jcc 1f\n" \
627 "add%.l %#0x10000,%0\n" \
628 "1: move%.l %2,%3\n" \
629 "clr%.w %2\n" \
630 "swap %2\n" \
631 "swap %3\n" \
632 "clr%.w %3\n" \
633 "add%.l %3,%1\n" \
634 "addx%.l %2,%0\n" \
635 "| End inlined umul_ppmm" \
636 : "=&d" ((USItype)(xh)), "=&d" ((USItype)(xl)), \
637 "=d" (__umul_tmp1), "=&d" (__umul_tmp2) \
638 : "%2" ((USItype)(a)), "d" ((USItype)(b))); \
639} while (0)
640#define UMUL_TIME 100
641#define UDIV_TIME 400
642#endif /* not mc68020 */
643#endif /* mc68000 */
644
645/***************************************
646 ************** 88000 ****************
647 ***************************************/
648#if defined(__m88000__) && W_TYPE_SIZE == 32
649#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
650 __asm__ ("addu.co %1,%r4,%r5\n" \
651 "addu.ci %0,%r2,%r3" \
652 : "=r" ((USItype)(sh)), \
653 "=&r" ((USItype)(sl)) \
654 : "%rJ" ((USItype)(ah)), \
655 "rJ" ((USItype)(bh)), \
656 "%rJ" ((USItype)(al)), \
657 "rJ" ((USItype)(bl)))
658#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
659 __asm__ ("subu.co %1,%r4,%r5\n" \
660 "subu.ci %0,%r2,%r3" \
661 : "=r" ((USItype)(sh)), \
662 "=&r" ((USItype)(sl)) \
663 : "rJ" ((USItype)(ah)), \
664 "rJ" ((USItype)(bh)), \
665 "rJ" ((USItype)(al)), \
666 "rJ" ((USItype)(bl)))
667#define count_leading_zeros(count, x) \
668do { \
669 USItype __cbtmp; \
670 __asm__ ("ff1 %0,%1" \
671 : "=r" (__cbtmp) \
672 : "r" ((USItype)(x))); \
673 (count) = __cbtmp ^ 31; \
674} while (0)
675#define COUNT_LEADING_ZEROS_0 63 /* sic */
676#if defined(__m88110__)
677#define umul_ppmm(wh, wl, u, v) \
678do { \
679 union {UDItype __ll; \
680 struct {USItype __h, __l; } __i; \
681 } __x; \
682 __asm__ ("mulu.d %0,%1,%2" : "=r" (__x.__ll) : "r" (u), "r" (v)); \
683 (wh) = __x.__i.__h; \
684 (wl) = __x.__i.__l; \
685} while (0)
686#define udiv_qrnnd(q, r, n1, n0, d) \
687 ({union {UDItype __ll; \
688 struct {USItype __h, __l; } __i; \
689 } __x, __q; \
690 __x.__i.__h = (n1); __x.__i.__l = (n0); \
691 __asm__ ("divu.d %0,%1,%2" \
692 : "=r" (__q.__ll) : "r" (__x.__ll), "r" (d)); \
693 (r) = (n0) - __q.__l * (d); (q) = __q.__l; })
694#define UMUL_TIME 5
695#define UDIV_TIME 25
696#else
697#define UMUL_TIME 17
698#define UDIV_TIME 150
699#endif /* __m88110__ */
700#endif /* __m88000__ */
701
702/***************************************
703 ************** MIPS *****************
704 ***************************************/
705#if defined(__mips__) && W_TYPE_SIZE == 32
706#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7
707#define umul_ppmm(w1, w0, u, v) \
708 __asm__ ("multu %2,%3" \
709 : "=l" ((USItype)(w0)), \
710 "=h" ((USItype)(w1)) \
711 : "d" ((USItype)(u)), \
712 "d" ((USItype)(v)))
713#else
714#define umul_ppmm(w1, w0, u, v) \
715 __asm__ ("multu %2,%3\n" \
716 "mflo %0\n" \
717 "mfhi %1" \
718 : "=d" ((USItype)(w0)), \
719 "=d" ((USItype)(w1)) \
720 : "d" ((USItype)(u)), \
721 "d" ((USItype)(v)))
722#endif
723#define UMUL_TIME 10
724#define UDIV_TIME 100
725#endif /* __mips__ */
726
727/***************************************
728 ************** MIPS/64 **************
729 ***************************************/
730#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
731#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7
732#define umul_ppmm(w1, w0, u, v) \
733 __asm__ ("dmultu %2,%3" \
734 : "=l" ((UDItype)(w0)), \
735 "=h" ((UDItype)(w1)) \
736 : "d" ((UDItype)(u)), \
737 "d" ((UDItype)(v)))
738#else
739#define umul_ppmm(w1, w0, u, v) \
740 __asm__ ("dmultu %2,%3\n" \
741 "mflo %0\n" \
742 "mfhi %1" \
743 : "=d" ((UDItype)(w0)), \
744 "=d" ((UDItype)(w1)) \
745 : "d" ((UDItype)(u)), \
746 "d" ((UDItype)(v)))
747#endif
748#define UMUL_TIME 20
749#define UDIV_TIME 140
750#endif /* __mips__ */
751
752/***************************************
753 ************** 32000 ****************
754 ***************************************/
755#if defined(__ns32000__) && W_TYPE_SIZE == 32
756#define umul_ppmm(w1, w0, u, v) \
757 ({union {UDItype __ll; \
758 struct {USItype __l, __h; } __i; \
759 } __xx; \
760 __asm__ ("meid %2,%0" \
761 : "=g" (__xx.__ll) \
762 : "%0" ((USItype)(u)), \
763 "g" ((USItype)(v))); \
764 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; })
765#define __umulsidi3(u, v) \
766 ({UDItype __w; \
767 __asm__ ("meid %2,%0" \
768 : "=g" (__w) \
769 : "%0" ((USItype)(u)), \
770 "g" ((USItype)(v))); \
771 __w; })
772#define udiv_qrnnd(q, r, n1, n0, d) \
773 ({union {UDItype __ll; \
774 struct {USItype __l, __h; } __i; \
775 } __xx; \
776 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
777 __asm__ ("deid %2,%0" \
778 : "=g" (__xx.__ll) \
779 : "0" (__xx.__ll), \
780 "g" ((USItype)(d))); \
781 (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
782#define count_trailing_zeros(count, x) \
783do { \
784 __asm__("ffsd %2,%0" \
785 : "=r"((USItype) (count)) \
786 : "0"((USItype) 0), "r"((USItype) (x))); \
787 } while (0)
788#endif /* __ns32000__ */
789
790/***************************************
791 ************** PPC ******************
792 ***************************************/
793#if (defined(_ARCH_PPC) || defined(_IBMR2)) && W_TYPE_SIZE == 32
794#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
795do { \
796 if (__builtin_constant_p(bh) && (bh) == 0) \
797 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
798 : "=r" ((USItype)(sh)), \
799 "=&r" ((USItype)(sl)) \
800 : "%r" ((USItype)(ah)), \
801 "%r" ((USItype)(al)), \
802 "rI" ((USItype)(bl))); \
803 else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
804 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
805 : "=r" ((USItype)(sh)), \
806 "=&r" ((USItype)(sl)) \
807 : "%r" ((USItype)(ah)), \
808 "%r" ((USItype)(al)), \
809 "rI" ((USItype)(bl))); \
810 else \
811 __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
812 : "=r" ((USItype)(sh)), \
813 "=&r" ((USItype)(sl)) \
814 : "%r" ((USItype)(ah)), \
815 "r" ((USItype)(bh)), \
816 "%r" ((USItype)(al)), \
817 "rI" ((USItype)(bl))); \
818} while (0)
819#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
820do { \
821 if (__builtin_constant_p(ah) && (ah) == 0) \
822 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
823 : "=r" ((USItype)(sh)), \
824 "=&r" ((USItype)(sl)) \
825 : "r" ((USItype)(bh)), \
826 "rI" ((USItype)(al)), \
827 "r" ((USItype)(bl))); \
828 else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \
829 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
830 : "=r" ((USItype)(sh)), \
831 "=&r" ((USItype)(sl)) \
832 : "r" ((USItype)(bh)), \
833 "rI" ((USItype)(al)), \
834 "r" ((USItype)(bl))); \
835 else if (__builtin_constant_p(bh) && (bh) == 0) \
836 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
837 : "=r" ((USItype)(sh)), \
838 "=&r" ((USItype)(sl)) \
839 : "r" ((USItype)(ah)), \
840 "rI" ((USItype)(al)), \
841 "r" ((USItype)(bl))); \
842 else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
843 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
844 : "=r" ((USItype)(sh)), \
845 "=&r" ((USItype)(sl)) \
846 : "r" ((USItype)(ah)), \
847 "rI" ((USItype)(al)), \
848 "r" ((USItype)(bl))); \
849 else \
850 __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
851 : "=r" ((USItype)(sh)), \
852 "=&r" ((USItype)(sl)) \
853 : "r" ((USItype)(ah)), \
854 "r" ((USItype)(bh)), \
855 "rI" ((USItype)(al)), \
856 "r" ((USItype)(bl))); \
857} while (0)
858#define count_leading_zeros(count, x) \
859 __asm__ ("{cntlz|cntlzw} %0,%1" \
860 : "=r" ((USItype)(count)) \
861 : "r" ((USItype)(x)))
862#define COUNT_LEADING_ZEROS_0 32
863#if defined(_ARCH_PPC)
864#define umul_ppmm(ph, pl, m0, m1) \
865do { \
866 USItype __m0 = (m0), __m1 = (m1); \
867 __asm__ ("mulhwu %0,%1,%2" \
868 : "=r" ((USItype) ph) \
869 : "%r" (__m0), \
870 "r" (__m1)); \
871 (pl) = __m0 * __m1; \
872} while (0)
873#define UMUL_TIME 15
874#define smul_ppmm(ph, pl, m0, m1) \
875do { \
876 SItype __m0 = (m0), __m1 = (m1); \
877 __asm__ ("mulhw %0,%1,%2" \
878 : "=r" ((SItype) ph) \
879 : "%r" (__m0), \
880 "r" (__m1)); \
881 (pl) = __m0 * __m1; \
882} while (0)
883#define SMUL_TIME 14
884#define UDIV_TIME 120
885#else
886#define umul_ppmm(xh, xl, m0, m1) \
887do { \
888 USItype __m0 = (m0), __m1 = (m1); \
889 __asm__ ("mul %0,%2,%3" \
890 : "=r" ((USItype)(xh)), \
891 "=q" ((USItype)(xl)) \
892 : "r" (__m0), \
893 "r" (__m1)); \
894 (xh) += ((((SItype) __m0 >> 31) & __m1) \
895 + (((SItype) __m1 >> 31) & __m0)); \
896} while (0)
897#define UMUL_TIME 8
898#define smul_ppmm(xh, xl, m0, m1) \
899 __asm__ ("mul %0,%2,%3" \
900 : "=r" ((SItype)(xh)), \
901 "=q" ((SItype)(xl)) \
902 : "r" (m0), \
903 "r" (m1))
904#define SMUL_TIME 4
905#define sdiv_qrnnd(q, r, nh, nl, d) \
906 __asm__ ("div %0,%2,%4" \
907 : "=r" ((SItype)(q)), "=q" ((SItype)(r)) \
908 : "r" ((SItype)(nh)), "1" ((SItype)(nl)), "r" ((SItype)(d)))
909#define UDIV_TIME 100
910#endif
911#endif /* Power architecture variants. */
912
913/***************************************
914 ************** PYR ******************
915 ***************************************/
916#if defined(__pyr__) && W_TYPE_SIZE == 32
917#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
918 __asm__ ("addw %5,%1\n" \
919 "addwc %3,%0" \
920 : "=r" ((USItype)(sh)), \
921 "=&r" ((USItype)(sl)) \
922 : "%0" ((USItype)(ah)), \
923 "g" ((USItype)(bh)), \
924 "%1" ((USItype)(al)), \
925 "g" ((USItype)(bl)))
926#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
927 __asm__ ("subw %5,%1\n" \
928 "subwb %3,%0" \
929 : "=r" ((USItype)(sh)), \
930 "=&r" ((USItype)(sl)) \
931 : "0" ((USItype)(ah)), \
932 "g" ((USItype)(bh)), \
933 "1" ((USItype)(al)), \
934 "g" ((USItype)(bl)))
935 /* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP. */
936#define umul_ppmm(w1, w0, u, v) \
937 ({union {UDItype __ll; \
938 struct {USItype __h, __l; } __i; \
939 } __xx; \
940 __asm__ ("movw %1,%R0\n" \
941 "uemul %2,%0" \
942 : "=&r" (__xx.__ll) \
943 : "g" ((USItype) (u)), \
944 "g" ((USItype)(v))); \
945 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; })
946#endif /* __pyr__ */
947
948/***************************************
949 ************** RT/ROMP **************
950 ***************************************/
951#if defined(__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32
952#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
953 __asm__ ("a %1,%5\n" \
954 "ae %0,%3" \
955 : "=r" ((USItype)(sh)), \
956 "=&r" ((USItype)(sl)) \
957 : "%0" ((USItype)(ah)), \
958 "r" ((USItype)(bh)), \
959 "%1" ((USItype)(al)), \
960 "r" ((USItype)(bl)))
961#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
962 __asm__ ("s %1,%5\n" \
963 "se %0,%3" \
964 : "=r" ((USItype)(sh)), \
965 "=&r" ((USItype)(sl)) \
966 : "0" ((USItype)(ah)), \
967 "r" ((USItype)(bh)), \
968 "1" ((USItype)(al)), \
969 "r" ((USItype)(bl)))
970#define umul_ppmm(ph, pl, m0, m1) \
971do { \
972 USItype __m0 = (m0), __m1 = (m1); \
973 __asm__ ( \
974 "s r2,r2\n" \
975 "mts r10,%2\n" \
976 "m r2,%3\n" \
977 "m r2,%3\n" \
978 "m r2,%3\n" \
979 "m r2,%3\n" \
980 "m r2,%3\n" \
981 "m r2,%3\n" \
982 "m r2,%3\n" \
983 "m r2,%3\n" \
984 "m r2,%3\n" \
985 "m r2,%3\n" \
986 "m r2,%3\n" \
987 "m r2,%3\n" \
988 "m r2,%3\n" \
989 "m r2,%3\n" \
990 "m r2,%3\n" \
991 "m r2,%3\n" \
992 "cas %0,r2,r0\n" \
993 "mfs r10,%1" \
994 : "=r" ((USItype)(ph)), \
995 "=r" ((USItype)(pl)) \
996 : "%r" (__m0), \
997 "r" (__m1) \
998 : "r2"); \
999 (ph) += ((((SItype) __m0 >> 31) & __m1) \
1000 + (((SItype) __m1 >> 31) & __m0)); \
1001} while (0)
1002#define UMUL_TIME 20
1003#define UDIV_TIME 200
1004#define count_leading_zeros(count, x) \
1005do { \
1006 if ((x) >= 0x10000) \
1007 __asm__ ("clz %0,%1" \
1008 : "=r" ((USItype)(count)) \
1009 : "r" ((USItype)(x) >> 16)); \
1010 else { \
1011 __asm__ ("clz %0,%1" \
1012 : "=r" ((USItype)(count)) \
1013 : "r" ((USItype)(x))); \
1014 (count) += 16; \
1015 } \
1016} while (0)
1017#endif /* RT/ROMP */
1018
1019/***************************************
1020 ************** SH2 ******************
1021 ***************************************/
1022#if (defined(__sh2__) || defined(__sh3__) || defined(__SH4__)) \
1023 && W_TYPE_SIZE == 32
1024#define umul_ppmm(w1, w0, u, v) \
1025 __asm__ ( \
1026 "dmulu.l %2,%3\n" \
1027 "sts macl,%1\n" \
1028 "sts mach,%0" \
1029 : "=r" ((USItype)(w1)), \
1030 "=r" ((USItype)(w0)) \
1031 : "r" ((USItype)(u)), \
1032 "r" ((USItype)(v)) \
1033 : "macl", "mach")
1034#define UMUL_TIME 5
1035#endif
1036
1037/***************************************
1038 ************** SPARC ****************
1039 ***************************************/
1040#if defined(__sparc__) && W_TYPE_SIZE == 32
1041#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1042 __asm__ ("addcc %r4,%5,%1\n" \
1043 "addx %r2,%3,%0" \
1044 : "=r" ((USItype)(sh)), \
1045 "=&r" ((USItype)(sl)) \
1046 : "%rJ" ((USItype)(ah)), \
1047 "rI" ((USItype)(bh)), \
1048 "%rJ" ((USItype)(al)), \
1049 "rI" ((USItype)(bl)) \
1050 __CLOBBER_CC)
1051#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1052 __asm__ ("subcc %r4,%5,%1\n" \
1053 "subx %r2,%3,%0" \
1054 : "=r" ((USItype)(sh)), \
1055 "=&r" ((USItype)(sl)) \
1056 : "rJ" ((USItype)(ah)), \
1057 "rI" ((USItype)(bh)), \
1058 "rJ" ((USItype)(al)), \
1059 "rI" ((USItype)(bl)) \
1060 __CLOBBER_CC)
1061#if defined(__sparc_v8__)
1062/* Don't match immediate range because, 1) it is not often useful,
1063 2) the 'I' flag thinks of the range as a 13 bit signed interval,
1064 while we want to match a 13 bit interval, sign extended to 32 bits,
1065 but INTERPRETED AS UNSIGNED. */
1066#define umul_ppmm(w1, w0, u, v) \
1067 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
1068 : "=r" ((USItype)(w1)), \
1069 "=r" ((USItype)(w0)) \
1070 : "r" ((USItype)(u)), \
1071 "r" ((USItype)(v)))
1072#define UMUL_TIME 5
1073#ifndef SUPERSPARC /* SuperSPARC's udiv only handles 53 bit dividends */
1074#define udiv_qrnnd(q, r, n1, n0, d) \
1075do { \
1076 USItype __q; \
1077 __asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0" \
1078 : "=r" ((USItype)(__q)) \
1079 : "r" ((USItype)(n1)), \
1080 "r" ((USItype)(n0)), \
1081 "r" ((USItype)(d))); \
1082 (r) = (n0) - __q * (d); \
1083 (q) = __q; \
1084} while (0)
1085#define UDIV_TIME 25
1086#endif /* SUPERSPARC */
1087#else /* ! __sparc_v8__ */
1088#if defined(__sparclite__)
1089/* This has hardware multiply but not divide. It also has two additional
1090 instructions scan (ffs from high bit) and divscc. */
1091#define umul_ppmm(w1, w0, u, v) \
1092 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
1093 : "=r" ((USItype)(w1)), \
1094 "=r" ((USItype)(w0)) \
1095 : "r" ((USItype)(u)), \
1096 "r" ((USItype)(v)))
1097#define UMUL_TIME 5
1098#define udiv_qrnnd(q, r, n1, n0, d) \
1099 __asm__ ("! Inlined udiv_qrnnd\n" \
1100 "wr %%g0,%2,%%y ! Not a delayed write for sparclite\n" \
1101 "tst %%g0\n" \
1102 "divscc %3,%4,%%g1\n" \
1103 "divscc %%g1,%4,%%g1\n" \
1104 "divscc %%g1,%4,%%g1\n" \
1105 "divscc %%g1,%4,%%g1\n" \
1106 "divscc %%g1,%4,%%g1\n" \
1107 "divscc %%g1,%4,%%g1\n" \
1108 "divscc %%g1,%4,%%g1\n" \
1109 "divscc %%g1,%4,%%g1\n" \
1110 "divscc %%g1,%4,%%g1\n" \
1111 "divscc %%g1,%4,%%g1\n" \
1112 "divscc %%g1,%4,%%g1\n" \
1113 "divscc %%g1,%4,%%g1\n" \
1114 "divscc %%g1,%4,%%g1\n" \
1115 "divscc %%g1,%4,%%g1\n" \
1116 "divscc %%g1,%4,%%g1\n" \
1117 "divscc %%g1,%4,%%g1\n" \
1118 "divscc %%g1,%4,%%g1\n" \
1119 "divscc %%g1,%4,%%g1\n" \
1120 "divscc %%g1,%4,%%g1\n" \
1121 "divscc %%g1,%4,%%g1\n" \
1122 "divscc %%g1,%4,%%g1\n" \
1123 "divscc %%g1,%4,%%g1\n" \
1124 "divscc %%g1,%4,%%g1\n" \
1125 "divscc %%g1,%4,%%g1\n" \
1126 "divscc %%g1,%4,%%g1\n" \
1127 "divscc %%g1,%4,%%g1\n" \
1128 "divscc %%g1,%4,%%g1\n" \
1129 "divscc %%g1,%4,%%g1\n" \
1130 "divscc %%g1,%4,%%g1\n" \
1131 "divscc %%g1,%4,%%g1\n" \
1132 "divscc %%g1,%4,%%g1\n" \
1133 "divscc %%g1,%4,%0\n" \
1134 "rd %%y,%1\n" \
1135 "bl,a 1f\n" \
1136 "add %1,%4,%1\n" \
1137 "1: ! End of inline udiv_qrnnd" \
1138 : "=r" ((USItype)(q)), \
1139 "=r" ((USItype)(r)) \
1140 : "r" ((USItype)(n1)), \
1141 "r" ((USItype)(n0)), \
1142 "rI" ((USItype)(d)) \
1143 : "%g1" __AND_CLOBBER_CC)
1144#define UDIV_TIME 37
1145#define count_leading_zeros(count, x) \
1146 __asm__ ("scan %1,0,%0" \
1147 : "=r" ((USItype)(x)) \
1148 : "r" ((USItype)(count)))
1149/* Early sparclites return 63 for an argument of 0, but they warn that future
1150 implementations might change this. Therefore, leave COUNT_LEADING_ZEROS_0
1151 undefined. */
1152#endif /* __sparclite__ */
1153#endif /* __sparc_v8__ */
1154 /* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd. */
1155#ifndef umul_ppmm
1156#define umul_ppmm(w1, w0, u, v) \
1157 __asm__ ("! Inlined umul_ppmm\n" \
1158 "wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr\n" \
1159 "sra %3,31,%%g2 ! Don't move this insn\n" \
1160 "and %2,%%g2,%%g2 ! Don't move this insn\n" \
1161 "andcc %%g0,0,%%g1 ! Don't move this insn\n" \
1162 "mulscc %%g1,%3,%%g1\n" \
1163 "mulscc %%g1,%3,%%g1\n" \
1164 "mulscc %%g1,%3,%%g1\n" \
1165 "mulscc %%g1,%3,%%g1\n" \
1166 "mulscc %%g1,%3,%%g1\n" \
1167 "mulscc %%g1,%3,%%g1\n" \
1168 "mulscc %%g1,%3,%%g1\n" \
1169 "mulscc %%g1,%3,%%g1\n" \
1170 "mulscc %%g1,%3,%%g1\n" \
1171 "mulscc %%g1,%3,%%g1\n" \
1172 "mulscc %%g1,%3,%%g1\n" \
1173 "mulscc %%g1,%3,%%g1\n" \
1174 "mulscc %%g1,%3,%%g1\n" \
1175 "mulscc %%g1,%3,%%g1\n" \
1176 "mulscc %%g1,%3,%%g1\n" \
1177 "mulscc %%g1,%3,%%g1\n" \
1178 "mulscc %%g1,%3,%%g1\n" \
1179 "mulscc %%g1,%3,%%g1\n" \
1180 "mulscc %%g1,%3,%%g1\n" \
1181 "mulscc %%g1,%3,%%g1\n" \
1182 "mulscc %%g1,%3,%%g1\n" \
1183 "mulscc %%g1,%3,%%g1\n" \
1184 "mulscc %%g1,%3,%%g1\n" \
1185 "mulscc %%g1,%3,%%g1\n" \
1186 "mulscc %%g1,%3,%%g1\n" \
1187 "mulscc %%g1,%3,%%g1\n" \
1188 "mulscc %%g1,%3,%%g1\n" \
1189 "mulscc %%g1,%3,%%g1\n" \
1190 "mulscc %%g1,%3,%%g1\n" \
1191 "mulscc %%g1,%3,%%g1\n" \
1192 "mulscc %%g1,%3,%%g1\n" \
1193 "mulscc %%g1,%3,%%g1\n" \
1194 "mulscc %%g1,0,%%g1\n" \
1195 "add %%g1,%%g2,%0\n" \
1196 "rd %%y,%1" \
1197 : "=r" ((USItype)(w1)), \
1198 "=r" ((USItype)(w0)) \
1199 : "%rI" ((USItype)(u)), \
1200 "r" ((USItype)(v)) \
1201 : "%g1", "%g2" __AND_CLOBBER_CC)
1202#define UMUL_TIME 39 /* 39 instructions */
1203#endif
1204#ifndef udiv_qrnnd
1205#ifndef LONGLONG_STANDALONE
1206#define udiv_qrnnd(q, r, n1, n0, d) \
1207do { USItype __r; \
1208 (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \
1209 (r) = __r; \
1210} while (0)
1211 extern USItype __udiv_qrnnd();
1212#define UDIV_TIME 140
1213#endif /* LONGLONG_STANDALONE */
1214#endif /* udiv_qrnnd */
1215#endif /* __sparc__ */
1216
1217/***************************************
1218 ************** VAX ******************
1219 ***************************************/
1220#if defined(__vax__) && W_TYPE_SIZE == 32
1221#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1222 __asm__ ("addl2 %5,%1\n" \
1223 "adwc %3,%0" \
1224 : "=g" ((USItype)(sh)), \
1225 "=&g" ((USItype)(sl)) \
1226 : "%0" ((USItype)(ah)), \
1227 "g" ((USItype)(bh)), \
1228 "%1" ((USItype)(al)), \
1229 "g" ((USItype)(bl)))
1230#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1231 __asm__ ("subl2 %5,%1\n" \
1232 "sbwc %3,%0" \
1233 : "=g" ((USItype)(sh)), \
1234 "=&g" ((USItype)(sl)) \
1235 : "0" ((USItype)(ah)), \
1236 "g" ((USItype)(bh)), \
1237 "1" ((USItype)(al)), \
1238 "g" ((USItype)(bl)))
1239#define umul_ppmm(xh, xl, m0, m1) \
1240do { \
1241 union {UDItype __ll; \
1242 struct {USItype __l, __h; } __i; \
1243 } __xx; \
1244 USItype __m0 = (m0), __m1 = (m1); \
1245 __asm__ ("emul %1,%2,$0,%0" \
1246 : "=g" (__xx.__ll) \
1247 : "g" (__m0), \
1248 "g" (__m1)); \
1249 (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
1250 (xh) += ((((SItype) __m0 >> 31) & __m1) \
1251 + (((SItype) __m1 >> 31) & __m0)); \
1252} while (0)
1253#define sdiv_qrnnd(q, r, n1, n0, d) \
1254do { \
1255 union {DItype __ll; \
1256 struct {SItype __l, __h; } __i; \
1257 } __xx; \
1258 __xx.__i.__h = n1; __xx.__i.__l = n0; \
1259 __asm__ ("ediv %3,%2,%0,%1" \
1260 : "=g" (q), "=g" (r) \
1261 : "g" (__xx.__ll), "g" (d)); \
1262} while (0)
1263#endif /* __vax__ */
1264
1265/***************************************
1266 ************** Z8000 ****************
1267 ***************************************/
1268#if defined(__z8000__) && W_TYPE_SIZE == 16
1269#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1270 __asm__ ("add %H1,%H5\n\tadc %H0,%H3" \
1271 : "=r" ((unsigned int)(sh)), \
1272 "=&r" ((unsigned int)(sl)) \
1273 : "%0" ((unsigned int)(ah)), \
1274 "r" ((unsigned int)(bh)), \
1275 "%1" ((unsigned int)(al)), \
1276 "rQR" ((unsigned int)(bl)))
1277#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1278 __asm__ ("sub %H1,%H5\n\tsbc %H0,%H3" \
1279 : "=r" ((unsigned int)(sh)), \
1280 "=&r" ((unsigned int)(sl)) \
1281 : "0" ((unsigned int)(ah)), \
1282 "r" ((unsigned int)(bh)), \
1283 "1" ((unsigned int)(al)), \
1284 "rQR" ((unsigned int)(bl)))
1285#define umul_ppmm(xh, xl, m0, m1) \
1286do { \
1287 union {long int __ll; \
1288 struct {unsigned int __h, __l; } __i; \
1289 } __xx; \
1290 unsigned int __m0 = (m0), __m1 = (m1); \
1291 __asm__ ("mult %S0,%H3" \
1292 : "=r" (__xx.__i.__h), \
1293 "=r" (__xx.__i.__l) \
1294 : "%1" (__m0), \
1295 "rQR" (__m1)); \
1296 (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
1297 (xh) += ((((signed int) __m0 >> 15) & __m1) \
1298 + (((signed int) __m1 >> 15) & __m0)); \
1299} while (0)
1300#endif /* __z8000__ */
1301
1302#endif /* __GNUC__ */
1303
1304/***************************************
1305 *********** Generic Versions ********
1306 ***************************************/
1307#if !defined(umul_ppmm) && defined(__umulsidi3)
1308#define umul_ppmm(ph, pl, m0, m1) \
1309{ \
1310 UDWtype __ll = __umulsidi3(m0, m1); \
1311 ph = (UWtype) (__ll >> W_TYPE_SIZE); \
1312 pl = (UWtype) __ll; \
1313}
1314#endif
1315
1316#if !defined(__umulsidi3)
1317#define __umulsidi3(u, v) \
1318 ({UWtype __hi, __lo; \
1319 umul_ppmm(__hi, __lo, u, v); \
1320 ((UDWtype) __hi << W_TYPE_SIZE) | __lo; })
1321#endif
1322
1323 /* If this machine has no inline assembler, use C macros. */
1324
1325#if !defined(add_ssaaaa)
1326#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1327do { \
1328 UWtype __x; \
1329 __x = (al) + (bl); \
1330 (sh) = (ah) + (bh) + (__x < (al)); \
1331 (sl) = __x; \
1332} while (0)
1333#endif
1334
1335#if !defined(sub_ddmmss)
1336#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1337do { \
1338 UWtype __x; \
1339 __x = (al) - (bl); \
1340 (sh) = (ah) - (bh) - (__x > (al)); \
1341 (sl) = __x; \
1342} while (0)
1343#endif
1344
1345#if !defined(umul_ppmm)
1346#define umul_ppmm(w1, w0, u, v) \
1347do { \
1348 UWtype __x0, __x1, __x2, __x3; \
1349 UHWtype __ul, __vl, __uh, __vh; \
1350 UWtype __u = (u), __v = (v); \
1351 \
1352 __ul = __ll_lowpart(__u); \
1353 __uh = __ll_highpart(__u); \
1354 __vl = __ll_lowpart(__v); \
1355 __vh = __ll_highpart(__v); \
1356 \
1357 __x0 = (UWtype) __ul * __vl; \
1358 __x1 = (UWtype) __ul * __vh; \
1359 __x2 = (UWtype) __uh * __vl; \
1360 __x3 = (UWtype) __uh * __vh; \
1361 \
1362 __x1 += __ll_highpart(__x0);/* this can't give carry */ \
1363 __x1 += __x2; /* but this indeed can */ \
1364 if (__x1 < __x2) /* did we get it? */ \
1365 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
1366 \
1367 (w1) = __x3 + __ll_highpart(__x1); \
1368 (w0) = (__ll_lowpart(__x1) << W_TYPE_SIZE/2) + __ll_lowpart(__x0); \
1369} while (0)
1370#endif
1371
1372#if !defined(umul_ppmm)
1373#define smul_ppmm(w1, w0, u, v) \
1374do { \
1375 UWtype __w1; \
1376 UWtype __m0 = (u), __m1 = (v); \
1377 umul_ppmm(__w1, w0, __m0, __m1); \
1378 (w1) = __w1 - (-(__m0 >> (W_TYPE_SIZE - 1)) & __m1) \
1379 - (-(__m1 >> (W_TYPE_SIZE - 1)) & __m0); \
1380} while (0)
1381#endif
1382
1383 /* Define this unconditionally, so it can be used for debugging. */
1384#define __udiv_qrnnd_c(q, r, n1, n0, d) \
1385do { \
1386 UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \
1387 __d1 = __ll_highpart(d); \
1388 __d0 = __ll_lowpart(d); \
1389 \
1390 __r1 = (n1) % __d1; \
1391 __q1 = (n1) / __d1; \
1392 __m = (UWtype) __q1 * __d0; \
1393 __r1 = __r1 * __ll_B | __ll_highpart(n0); \
1394 if (__r1 < __m) { \
1395 __q1--, __r1 += (d); \
1396 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */ \
1397 if (__r1 < __m) \
1398 __q1--, __r1 += (d); \
1399 } \
1400 __r1 -= __m; \
1401 \
1402 __r0 = __r1 % __d1; \
1403 __q0 = __r1 / __d1; \
1404 __m = (UWtype) __q0 * __d0; \
1405 __r0 = __r0 * __ll_B | __ll_lowpart(n0); \
1406 if (__r0 < __m) { \
1407 __q0--, __r0 += (d); \
1408 if (__r0 >= (d)) \
1409 if (__r0 < __m) \
1410 __q0--, __r0 += (d); \
1411 } \
1412 __r0 -= __m; \
1413 \
1414 (q) = (UWtype) __q1 * __ll_B | __q0; \
1415 (r) = __r0; \
1416} while (0)
1417
1418/* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
1419 __udiv_w_sdiv (defined in libgcc or elsewhere). */
1420#if !defined(udiv_qrnnd) && defined(sdiv_qrnnd)
1421#define udiv_qrnnd(q, r, nh, nl, d) \
1422do { \
1423 UWtype __r; \
1424 (q) = __MPN(udiv_w_sdiv) (&__r, nh, nl, d); \
1425 (r) = __r; \
1426} while (0)
1427#endif
1428
1429 /* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
1430#if !defined(udiv_qrnnd)
1431#define UDIV_NEEDS_NORMALIZATION 1
1432#define udiv_qrnnd __udiv_qrnnd_c
1433#endif
1434
1435#undef count_leading_zeros
1436#if !defined(count_leading_zeros)
1437 extern
1438#ifdef __STDC__
1439 const
1440#endif
1441 unsigned char __clz_tab[];
1442#define count_leading_zeros(count, x) \
1443do { \
1444 UWtype __xr = (x); \
1445 UWtype __a; \
1446 \
1447 if (W_TYPE_SIZE <= 32) { \
1448 __a = __xr < ((UWtype) 1 << 2*__BITS4) \
1449 ? (__xr < ((UWtype) 1 << __BITS4) ? 0 : __BITS4) \
1450 : (__xr < ((UWtype) 1 << 3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
1451 } \
1452 else { \
1453 for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8) \
1454 if (((__xr >> __a) & 0xff) != 0) \
1455 break; \
1456 } \
1457 \
1458 (count) = W_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
1459} while (0)
1460 /* This version gives a well-defined value for zero. */
1461#define COUNT_LEADING_ZEROS_0 W_TYPE_SIZE
1462#endif
1463
1464#if !defined(count_trailing_zeros)
1465/* Define count_trailing_zeros using count_leading_zeros. The latter might be
1466 defined in asm, but if it is not, the C version above is good enough. */
1467#define count_trailing_zeros(count, x) \
1468do { \
1469 UWtype __ctz_x = (x); \
1470 UWtype __ctz_c; \
1471 count_leading_zeros(__ctz_c, __ctz_x & -__ctz_x); \
1472 (count) = W_TYPE_SIZE - 1 - __ctz_c; \
1473} while (0)
1474#endif
1475
1476#ifndef UDIV_NEEDS_NORMALIZATION
1477#define UDIV_NEEDS_NORMALIZATION 0
1478#endif
diff --git a/lib/mpi/mpi-inline.h b/lib/mpi/mpi-inline.h
new file mode 100644
index 000000000000..e2b39852b30a
--- /dev/null
+++ b/lib/mpi/mpi-inline.h
@@ -0,0 +1,122 @@
1/* mpi-inline.h - Internal to the Multi Precision Integers
2 * Copyright (C) 1994, 1996, 1998, 1999 Free Software Foundation, Inc.
3 *
4 * This file is part of GnuPG.
5 *
6 * GnuPG is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * GnuPG is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
19 *
20 * Note: This code is heavily based on the GNU MP Library.
21 * Actually it's the same code with only minor changes in the
22 * way the data is stored; this is to support the abstraction
23 * of an optional secure memory allocation which may be used
24 * to avoid revealing of sensitive data due to paging etc.
25 * The GNU MP Library itself is published under the LGPL;
26 * however I decided to publish this code under the plain GPL.
27 */
28
29#ifndef G10_MPI_INLINE_H
30#define G10_MPI_INLINE_H
31
32#ifndef G10_MPI_INLINE_DECL
33#define G10_MPI_INLINE_DECL extern inline
34#endif
35
36G10_MPI_INLINE_DECL mpi_limb_t
37mpihelp_add_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
38 mpi_size_t s1_size, mpi_limb_t s2_limb)
39{
40 mpi_limb_t x;
41
42 x = *s1_ptr++;
43 s2_limb += x;
44 *res_ptr++ = s2_limb;
45 if (s2_limb < x) { /* sum is less than the left operand: handle carry */
46 while (--s1_size) {
47 x = *s1_ptr++ + 1; /* add carry */
48 *res_ptr++ = x; /* and store */
49 if (x) /* not 0 (no overflow): we can stop */
50 goto leave;
51 }
52 return 1; /* return carry (size of s1 to small) */
53 }
54
55leave:
56 if (res_ptr != s1_ptr) { /* not the same variable */
57 mpi_size_t i; /* copy the rest */
58 for (i = 0; i < s1_size - 1; i++)
59 res_ptr[i] = s1_ptr[i];
60 }
61 return 0; /* no carry */
62}
63
64G10_MPI_INLINE_DECL mpi_limb_t
65mpihelp_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
66 mpi_ptr_t s2_ptr, mpi_size_t s2_size)
67{
68 mpi_limb_t cy = 0;
69
70 if (s2_size)
71 cy = mpihelp_add_n(res_ptr, s1_ptr, s2_ptr, s2_size);
72
73 if (s1_size - s2_size)
74 cy = mpihelp_add_1(res_ptr + s2_size, s1_ptr + s2_size,
75 s1_size - s2_size, cy);
76 return cy;
77}
78
79G10_MPI_INLINE_DECL mpi_limb_t
80mpihelp_sub_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
81 mpi_size_t s1_size, mpi_limb_t s2_limb)
82{
83 mpi_limb_t x;
84
85 x = *s1_ptr++;
86 s2_limb = x - s2_limb;
87 *res_ptr++ = s2_limb;
88 if (s2_limb > x) {
89 while (--s1_size) {
90 x = *s1_ptr++;
91 *res_ptr++ = x - 1;
92 if (x)
93 goto leave;
94 }
95 return 1;
96 }
97
98leave:
99 if (res_ptr != s1_ptr) {
100 mpi_size_t i;
101 for (i = 0; i < s1_size - 1; i++)
102 res_ptr[i] = s1_ptr[i];
103 }
104 return 0;
105}
106
107G10_MPI_INLINE_DECL mpi_limb_t
108mpihelp_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
109 mpi_ptr_t s2_ptr, mpi_size_t s2_size)
110{
111 mpi_limb_t cy = 0;
112
113 if (s2_size)
114 cy = mpihelp_sub_n(res_ptr, s1_ptr, s2_ptr, s2_size);
115
116 if (s1_size - s2_size)
117 cy = mpihelp_sub_1(res_ptr + s2_size, s1_ptr + s2_size,
118 s1_size - s2_size, cy);
119 return cy;
120}
121
122#endif /*G10_MPI_INLINE_H */
diff --git a/lib/mpi/mpi-internal.h b/lib/mpi/mpi-internal.h
new file mode 100644
index 000000000000..77adcf6bc257
--- /dev/null
+++ b/lib/mpi/mpi-internal.h
@@ -0,0 +1,261 @@
1/* mpi-internal.h - Internal to the Multi Precision Integers
2 * Copyright (C) 1994, 1996 Free Software Foundation, Inc.
3 * Copyright (C) 1998, 2000 Free Software Foundation, Inc.
4 *
5 * This file is part of GnuPG.
6 *
7 * GnuPG is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * GnuPG is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
20 *
21 * Note: This code is heavily based on the GNU MP Library.
22 * Actually it's the same code with only minor changes in the
23 * way the data is stored; this is to support the abstraction
24 * of an optional secure memory allocation which may be used
25 * to avoid revealing of sensitive data due to paging etc.
26 * The GNU MP Library itself is published under the LGPL;
27 * however I decided to publish this code under the plain GPL.
28 */
29
30#ifndef G10_MPI_INTERNAL_H
31#define G10_MPI_INTERNAL_H
32
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/slab.h>
36#include <linux/string.h>
37#include <linux/mpi.h>
38#include <linux/errno.h>
39
40#define log_debug printk
41#define log_bug printk
42
43#define assert(x) \
44 do { \
45 if (!x) \
46 log_bug("failed assertion\n"); \
47 } while (0);
48
49/* If KARATSUBA_THRESHOLD is not already defined, define it to a
50 * value which is good on most machines. */
51
52/* tested 4, 16, 32 and 64, where 16 gave the best performance when
53 * checking a 768 and a 1024 bit ElGamal signature.
54 * (wk 22.12.97) */
55#ifndef KARATSUBA_THRESHOLD
56#define KARATSUBA_THRESHOLD 16
57#endif
58
59/* The code can't handle KARATSUBA_THRESHOLD smaller than 2. */
60#if KARATSUBA_THRESHOLD < 2
61#undef KARATSUBA_THRESHOLD
62#define KARATSUBA_THRESHOLD 2
63#endif
64
65typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */
66typedef int mpi_size_t; /* (must be a signed type) */
67
68#define ABS(x) (x >= 0 ? x : -x)
69#define MIN(l, o) ((l) < (o) ? (l) : (o))
70#define MAX(h, i) ((h) > (i) ? (h) : (i))
71
72static inline int RESIZE_IF_NEEDED(MPI a, unsigned b)
73{
74 if (a->alloced < b)
75 return mpi_resize(a, b);
76 return 0;
77}
78
79/* Copy N limbs from S to D. */
80#define MPN_COPY(d, s, n) \
81 do { \
82 mpi_size_t _i; \
83 for (_i = 0; _i < (n); _i++) \
84 (d)[_i] = (s)[_i]; \
85 } while (0)
86
87#define MPN_COPY_INCR(d, s, n) \
88 do { \
89 mpi_size_t _i; \
90 for (_i = 0; _i < (n); _i++) \
91 (d)[_i] = (d)[_i]; \
92 } while (0)
93
94#define MPN_COPY_DECR(d, s, n) \
95 do { \
96 mpi_size_t _i; \
97 for (_i = (n)-1; _i >= 0; _i--) \
98 (d)[_i] = (s)[_i]; \
99 } while (0)
100
101/* Zero N limbs at D */
102#define MPN_ZERO(d, n) \
103 do { \
104 int _i; \
105 for (_i = 0; _i < (n); _i++) \
106 (d)[_i] = 0; \
107 } while (0)
108
109#define MPN_NORMALIZE(d, n) \
110 do { \
111 while ((n) > 0) { \
112 if ((d)[(n)-1]) \
113 break; \
114 (n)--; \
115 } \
116 } while (0)
117
118#define MPN_NORMALIZE_NOT_ZERO(d, n) \
119 do { \
120 for (;;) { \
121 if ((d)[(n)-1]) \
122 break; \
123 (n)--; \
124 } \
125 } while (0)
126
127#define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \
128 do { \
129 if ((size) < KARATSUBA_THRESHOLD) \
130 mul_n_basecase(prodp, up, vp, size); \
131 else \
132 mul_n(prodp, up, vp, size, tspace); \
133 } while (0);
134
135/* Divide the two-limb number in (NH,,NL) by D, with DI being the largest
136 * limb not larger than (2**(2*BITS_PER_MP_LIMB))/D - (2**BITS_PER_MP_LIMB).
137 * If this would yield overflow, DI should be the largest possible number
138 * (i.e., only ones). For correct operation, the most significant bit of D
139 * has to be set. Put the quotient in Q and the remainder in R.
140 */
141#define UDIV_QRNND_PREINV(q, r, nh, nl, d, di) \
142 do { \
143 mpi_limb_t _q, _ql, _r; \
144 mpi_limb_t _xh, _xl; \
145 umul_ppmm(_q, _ql, (nh), (di)); \
146 _q += (nh); /* DI is 2**BITS_PER_MPI_LIMB too small */ \
147 umul_ppmm(_xh, _xl, _q, (d)); \
148 sub_ddmmss(_xh, _r, (nh), (nl), _xh, _xl); \
149 if (_xh) { \
150 sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \
151 _q++; \
152 if (_xh) { \
153 sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \
154 _q++; \
155 } \
156 } \
157 if (_r >= (d)) { \
158 _r -= (d); \
159 _q++; \
160 } \
161 (r) = _r; \
162 (q) = _q; \
163 } while (0)
164
165/*-- mpiutil.c --*/
166mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs);
167void mpi_free_limb_space(mpi_ptr_t a);
168void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs);
169
170/*-- mpi-bit.c --*/
171void mpi_rshift_limbs(MPI a, unsigned int count);
172int mpi_lshift_limbs(MPI a, unsigned int count);
173
174/*-- mpihelp-add.c --*/
175mpi_limb_t mpihelp_add_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
176 mpi_size_t s1_size, mpi_limb_t s2_limb);
177mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
178 mpi_ptr_t s2_ptr, mpi_size_t size);
179mpi_limb_t mpihelp_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
180 mpi_ptr_t s2_ptr, mpi_size_t s2_size);
181
182/*-- mpihelp-sub.c --*/
183mpi_limb_t mpihelp_sub_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
184 mpi_size_t s1_size, mpi_limb_t s2_limb);
185mpi_limb_t mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
186 mpi_ptr_t s2_ptr, mpi_size_t size);
187mpi_limb_t mpihelp_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
188 mpi_ptr_t s2_ptr, mpi_size_t s2_size);
189
190/*-- mpihelp-cmp.c --*/
191int mpihelp_cmp(mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size);
192
193/*-- mpihelp-mul.c --*/
194
195struct karatsuba_ctx {
196 struct karatsuba_ctx *next;
197 mpi_ptr_t tspace;
198 mpi_size_t tspace_size;
199 mpi_ptr_t tp;
200 mpi_size_t tp_size;
201};
202
203void mpihelp_release_karatsuba_ctx(struct karatsuba_ctx *ctx);
204
205mpi_limb_t mpihelp_addmul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
206 mpi_size_t s1_size, mpi_limb_t s2_limb);
207mpi_limb_t mpihelp_submul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
208 mpi_size_t s1_size, mpi_limb_t s2_limb);
209int mpihelp_mul_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size);
210int mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
211 mpi_ptr_t vp, mpi_size_t vsize, mpi_limb_t *_result);
212void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size);
213void mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size,
214 mpi_ptr_t tspace);
215
216int mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
217 mpi_ptr_t up, mpi_size_t usize,
218 mpi_ptr_t vp, mpi_size_t vsize,
219 struct karatsuba_ctx *ctx);
220
221/*-- mpihelp-mul_1.c (or xxx/cpu/ *.S) --*/
222mpi_limb_t mpihelp_mul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
223 mpi_size_t s1_size, mpi_limb_t s2_limb);
224
225/*-- mpihelp-div.c --*/
226mpi_limb_t mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
227 mpi_limb_t divisor_limb);
228mpi_limb_t mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
229 mpi_ptr_t np, mpi_size_t nsize,
230 mpi_ptr_t dp, mpi_size_t dsize);
231mpi_limb_t mpihelp_divmod_1(mpi_ptr_t quot_ptr,
232 mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
233 mpi_limb_t divisor_limb);
234
235/*-- mpihelp-shift.c --*/
236mpi_limb_t mpihelp_lshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
237 unsigned cnt);
238mpi_limb_t mpihelp_rshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
239 unsigned cnt);
240
241/* Define stuff for longlong.h. */
242#define W_TYPE_SIZE BITS_PER_MPI_LIMB
243typedef mpi_limb_t UWtype;
244typedef unsigned int UHWtype;
245#if defined(__GNUC__)
246typedef unsigned int UQItype __attribute__ ((mode(QI)));
247typedef int SItype __attribute__ ((mode(SI)));
248typedef unsigned int USItype __attribute__ ((mode(SI)));
249typedef int DItype __attribute__ ((mode(DI)));
250typedef unsigned int UDItype __attribute__ ((mode(DI)));
251#else
252typedef unsigned char UQItype;
253typedef long SItype;
254typedef unsigned long USItype;
255#endif
256
257#ifdef __GNUC__
258#include "mpi-inline.h"
259#endif
260
261#endif /*G10_MPI_INTERNAL_H */