aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/mpi/mpi-bit.c162
-rw-r--r--lib/mpi/mpicoder.c75
-rw-r--r--lib/mpi/mpih-div.c309
-rw-r--r--lib/mpi/mpih-mul.c30
-rw-r--r--lib/mpi/mpiutil.c88
5 files changed, 0 insertions, 664 deletions
diff --git a/lib/mpi/mpi-bit.c b/lib/mpi/mpi-bit.c
index 0c505361da19..568724804f29 100644
--- a/lib/mpi/mpi-bit.c
+++ b/lib/mpi/mpi-bit.c
@@ -54,165 +54,3 @@ unsigned mpi_get_nbits(MPI a)
54 return n; 54 return n;
55} 55}
56EXPORT_SYMBOL_GPL(mpi_get_nbits); 56EXPORT_SYMBOL_GPL(mpi_get_nbits);
57
58/****************
59 * Test whether bit N is set.
60 */
61int mpi_test_bit(MPI a, unsigned n)
62{
63 unsigned limbno, bitno;
64 mpi_limb_t limb;
65
66 limbno = n / BITS_PER_MPI_LIMB;
67 bitno = n % BITS_PER_MPI_LIMB;
68
69 if (limbno >= a->nlimbs)
70 return 0; /* too far left: this is a 0 */
71 limb = a->d[limbno];
72 return (limb & (A_LIMB_1 << bitno)) ? 1 : 0;
73}
74
75/****************
76 * Set bit N of A.
77 */
78int mpi_set_bit(MPI a, unsigned n)
79{
80 unsigned limbno, bitno;
81
82 limbno = n / BITS_PER_MPI_LIMB;
83 bitno = n % BITS_PER_MPI_LIMB;
84
85 if (limbno >= a->nlimbs) { /* resize */
86 if (a->alloced >= limbno)
87 if (mpi_resize(a, limbno + 1) < 0)
88 return -ENOMEM;
89 a->nlimbs = limbno + 1;
90 }
91 a->d[limbno] |= (A_LIMB_1 << bitno);
92 return 0;
93}
94
95/****************
96 * Set bit N of A. and clear all bits above
97 */
98int mpi_set_highbit(MPI a, unsigned n)
99{
100 unsigned limbno, bitno;
101
102 limbno = n / BITS_PER_MPI_LIMB;
103 bitno = n % BITS_PER_MPI_LIMB;
104
105 if (limbno >= a->nlimbs) { /* resize */
106 if (a->alloced >= limbno)
107 if (mpi_resize(a, limbno + 1) < 0)
108 return -ENOMEM;
109 a->nlimbs = limbno + 1;
110 }
111 a->d[limbno] |= (A_LIMB_1 << bitno);
112 for (bitno++; bitno < BITS_PER_MPI_LIMB; bitno++)
113 a->d[limbno] &= ~(A_LIMB_1 << bitno);
114 a->nlimbs = limbno + 1;
115 return 0;
116}
117
118/****************
119 * clear bit N of A and all bits above
120 */
121void mpi_clear_highbit(MPI a, unsigned n)
122{
123 unsigned limbno, bitno;
124
125 limbno = n / BITS_PER_MPI_LIMB;
126 bitno = n % BITS_PER_MPI_LIMB;
127
128 if (limbno >= a->nlimbs)
129 return; /* not allocated, so need to clear bits :-) */
130
131 for (; bitno < BITS_PER_MPI_LIMB; bitno++)
132 a->d[limbno] &= ~(A_LIMB_1 << bitno);
133 a->nlimbs = limbno + 1;
134}
135
136/****************
137 * Clear bit N of A.
138 */
139void mpi_clear_bit(MPI a, unsigned n)
140{
141 unsigned limbno, bitno;
142
143 limbno = n / BITS_PER_MPI_LIMB;
144 bitno = n % BITS_PER_MPI_LIMB;
145
146 if (limbno >= a->nlimbs)
147 return; /* don't need to clear this bit, it's to far to left */
148 a->d[limbno] &= ~(A_LIMB_1 << bitno);
149}
150
151/****************
152 * Shift A by N bits to the right
153 * FIXME: should use alloc_limb if X and A are same.
154 */
155int mpi_rshift(MPI x, MPI a, unsigned n)
156{
157 mpi_ptr_t xp;
158 mpi_size_t xsize;
159
160 xsize = a->nlimbs;
161 x->sign = a->sign;
162 if (RESIZE_IF_NEEDED(x, (size_t) xsize) < 0)
163 return -ENOMEM;
164 xp = x->d;
165
166 if (xsize) {
167 mpihelp_rshift(xp, a->d, xsize, n);
168 MPN_NORMALIZE(xp, xsize);
169 }
170 x->nlimbs = xsize;
171 return 0;
172}
173
174/****************
175 * Shift A by COUNT limbs to the left
176 * This is used only within the MPI library
177 */
178int mpi_lshift_limbs(MPI a, unsigned int count)
179{
180 const int n = a->nlimbs;
181 mpi_ptr_t ap;
182 int i;
183
184 if (!count || !n)
185 return 0;
186
187 if (RESIZE_IF_NEEDED(a, n + count) < 0)
188 return -ENOMEM;
189
190 ap = a->d;
191 for (i = n - 1; i >= 0; i--)
192 ap[i + count] = ap[i];
193 for (i = 0; i < count; i++)
194 ap[i] = 0;
195 a->nlimbs += count;
196 return 0;
197}
198
199/****************
200 * Shift A by COUNT limbs to the right
201 * This is used only within the MPI library
202 */
203void mpi_rshift_limbs(MPI a, unsigned int count)
204{
205 mpi_ptr_t ap = a->d;
206 mpi_size_t n = a->nlimbs;
207 unsigned int i;
208
209 if (count >= n) {
210 a->nlimbs = 0;
211 return;
212 }
213
214 for (i = 0; i < n - count; i++)
215 ap[i] = ap[i + count];
216 ap[i] = 0;
217 a->nlimbs -= count;
218}
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index f26b41fcb48c..f0fa65995800 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -74,81 +74,6 @@ leave:
74EXPORT_SYMBOL_GPL(mpi_read_from_buffer); 74EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
75 75
76/**************** 76/****************
77 * Make an mpi from a character string.
78 */
79int mpi_fromstr(MPI val, const char *str)
80{
81 int hexmode = 0, sign = 0, prepend_zero = 0, i, j, c, c1, c2;
82 unsigned nbits, nbytes, nlimbs;
83 mpi_limb_t a;
84
85 if (*str == '-') {
86 sign = 1;
87 str++;
88 }
89 if (*str == '0' && str[1] == 'x')
90 hexmode = 1;
91 else
92 return -EINVAL; /* other bases are not yet supported */
93 str += 2;
94
95 nbits = strlen(str) * 4;
96 if (nbits % 8)
97 prepend_zero = 1;
98 nbytes = (nbits + 7) / 8;
99 nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB;
100 if (val->alloced < nlimbs)
101 if (!mpi_resize(val, nlimbs))
102 return -ENOMEM;
103 i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
104 i %= BYTES_PER_MPI_LIMB;
105 j = val->nlimbs = nlimbs;
106 val->sign = sign;
107 for (; j > 0; j--) {
108 a = 0;
109 for (; i < BYTES_PER_MPI_LIMB; i++) {
110 if (prepend_zero) {
111 c1 = '0';
112 prepend_zero = 0;
113 } else
114 c1 = *str++;
115 assert(c1);
116 c2 = *str++;
117 assert(c2);
118 if (c1 >= '0' && c1 <= '9')
119 c = c1 - '0';
120 else if (c1 >= 'a' && c1 <= 'f')
121 c = c1 - 'a' + 10;
122 else if (c1 >= 'A' && c1 <= 'F')
123 c = c1 - 'A' + 10;
124 else {
125 mpi_clear(val);
126 return 1;
127 }
128 c <<= 4;
129 if (c2 >= '0' && c2 <= '9')
130 c |= c2 - '0';
131 else if (c2 >= 'a' && c2 <= 'f')
132 c |= c2 - 'a' + 10;
133 else if (c2 >= 'A' && c2 <= 'F')
134 c |= c2 - 'A' + 10;
135 else {
136 mpi_clear(val);
137 return 1;
138 }
139 a <<= 8;
140 a |= c;
141 }
142 i = 0;
143
144 val->d[j - 1] = a;
145 }
146
147 return 0;
148}
149EXPORT_SYMBOL_GPL(mpi_fromstr);
150
151/****************
152 * Return an allocated buffer with the MPI (msb first). 77 * Return an allocated buffer with the MPI (msb first).
153 * NBYTES receives the length of this buffer. Caller must free the 78 * NBYTES receives the length of this buffer. Caller must free the
154 * return string (This function does return a 0 byte buffer with NBYTES 79 * return string (This function does return a 0 byte buffer with NBYTES
diff --git a/lib/mpi/mpih-div.c b/lib/mpi/mpih-div.c
index cde1aaec18da..c57d1d46295e 100644
--- a/lib/mpi/mpih-div.c
+++ b/lib/mpi/mpih-div.c
@@ -37,159 +37,6 @@
37#define UDIV_TIME UMUL_TIME 37#define UDIV_TIME UMUL_TIME
38#endif 38#endif
39 39
40/* FIXME: We should be using invert_limb (or invert_normalized_limb)
41 * here (not udiv_qrnnd).
42 */
43
44mpi_limb_t
45mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
46 mpi_limb_t divisor_limb)
47{
48 mpi_size_t i;
49 mpi_limb_t n1, n0, r;
50 int dummy;
51
52 /* Botch: Should this be handled at all? Rely on callers? */
53 if (!dividend_size)
54 return 0;
55
56 /* If multiplication is much faster than division, and the
57 * dividend is large, pre-invert the divisor, and use
58 * only multiplications in the inner loop.
59 *
60 * This test should be read:
61 * Does it ever help to use udiv_qrnnd_preinv?
62 * && Does what we save compensate for the inversion overhead?
63 */
64 if (UDIV_TIME > (2 * UMUL_TIME + 6)
65 && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
66 int normalization_steps;
67
68 count_leading_zeros(normalization_steps, divisor_limb);
69 if (normalization_steps) {
70 mpi_limb_t divisor_limb_inverted;
71
72 divisor_limb <<= normalization_steps;
73
74 /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
75 * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
76 * most significant bit (with weight 2**N) implicit.
77 *
78 * Special case for DIVISOR_LIMB == 100...000.
79 */
80 if (!(divisor_limb << 1))
81 divisor_limb_inverted = ~(mpi_limb_t) 0;
82 else
83 udiv_qrnnd(divisor_limb_inverted, dummy,
84 -divisor_limb, 0, divisor_limb);
85
86 n1 = dividend_ptr[dividend_size - 1];
87 r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
88
89 /* Possible optimization:
90 * if (r == 0
91 * && divisor_limb > ((n1 << normalization_steps)
92 * | (dividend_ptr[dividend_size - 2] >> ...)))
93 * ...one division less...
94 */
95 for (i = dividend_size - 2; i >= 0; i--) {
96 n0 = dividend_ptr[i];
97 UDIV_QRNND_PREINV(dummy, r, r,
98 ((n1 << normalization_steps)
99 | (n0 >>
100 (BITS_PER_MPI_LIMB -
101 normalization_steps))),
102 divisor_limb,
103 divisor_limb_inverted);
104 n1 = n0;
105 }
106 UDIV_QRNND_PREINV(dummy, r, r,
107 n1 << normalization_steps,
108 divisor_limb, divisor_limb_inverted);
109 return r >> normalization_steps;
110 } else {
111 mpi_limb_t divisor_limb_inverted;
112
113 /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
114 * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
115 * most significant bit (with weight 2**N) implicit.
116 *
117 * Special case for DIVISOR_LIMB == 100...000.
118 */
119 if (!(divisor_limb << 1))
120 divisor_limb_inverted = ~(mpi_limb_t) 0;
121 else
122 udiv_qrnnd(divisor_limb_inverted, dummy,
123 -divisor_limb, 0, divisor_limb);
124
125 i = dividend_size - 1;
126 r = dividend_ptr[i];
127
128 if (r >= divisor_limb)
129 r = 0;
130 else
131 i--;
132
133 for (; i >= 0; i--) {
134 n0 = dividend_ptr[i];
135 UDIV_QRNND_PREINV(dummy, r, r,
136 n0, divisor_limb,
137 divisor_limb_inverted);
138 }
139 return r;
140 }
141 } else {
142 if (UDIV_NEEDS_NORMALIZATION) {
143 int normalization_steps;
144
145 count_leading_zeros(normalization_steps, divisor_limb);
146 if (normalization_steps) {
147 divisor_limb <<= normalization_steps;
148
149 n1 = dividend_ptr[dividend_size - 1];
150 r = n1 >> (BITS_PER_MPI_LIMB -
151 normalization_steps);
152
153 /* Possible optimization:
154 * if (r == 0
155 * && divisor_limb > ((n1 << normalization_steps)
156 * | (dividend_ptr[dividend_size - 2] >> ...)))
157 * ...one division less...
158 */
159 for (i = dividend_size - 2; i >= 0; i--) {
160 n0 = dividend_ptr[i];
161 udiv_qrnnd(dummy, r, r,
162 ((n1 << normalization_steps)
163 | (n0 >>
164 (BITS_PER_MPI_LIMB -
165 normalization_steps))),
166 divisor_limb);
167 n1 = n0;
168 }
169 udiv_qrnnd(dummy, r, r,
170 n1 << normalization_steps,
171 divisor_limb);
172 return r >> normalization_steps;
173 }
174 }
175 /* No normalization needed, either because udiv_qrnnd doesn't require
176 * it, or because DIVISOR_LIMB is already normalized. */
177 i = dividend_size - 1;
178 r = dividend_ptr[i];
179
180 if (r >= divisor_limb)
181 r = 0;
182 else
183 i--;
184
185 for (; i >= 0; i--) {
186 n0 = dividend_ptr[i];
187 udiv_qrnnd(dummy, r, r, n0, divisor_limb);
188 }
189 return r;
190 }
191}
192
193/* Divide num (NP/NSIZE) by den (DP/DSIZE) and write 40/* Divide num (NP/NSIZE) by den (DP/DSIZE) and write
194 * the NSIZE-DSIZE least significant quotient limbs at QP 41 * the NSIZE-DSIZE least significant quotient limbs at QP
195 * and the DSIZE long remainder at NP. If QEXTRA_LIMBS is 42 * and the DSIZE long remainder at NP. If QEXTRA_LIMBS is
@@ -387,159 +234,3 @@ q_test:
387 234
388 return most_significant_q_limb; 235 return most_significant_q_limb;
389} 236}
390
391/****************
392 * Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB.
393 * Write DIVIDEND_SIZE limbs of quotient at QUOT_PTR.
394 * Return the single-limb remainder.
395 * There are no constraints on the value of the divisor.
396 *
397 * QUOT_PTR and DIVIDEND_PTR might point to the same limb.
398 */
399
400mpi_limb_t
401mpihelp_divmod_1(mpi_ptr_t quot_ptr,
402 mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
403 mpi_limb_t divisor_limb)
404{
405 mpi_size_t i;
406 mpi_limb_t n1, n0, r;
407 int dummy;
408
409 if (!dividend_size)
410 return 0;
411
412 /* If multiplication is much faster than division, and the
413 * dividend is large, pre-invert the divisor, and use
414 * only multiplications in the inner loop.
415 *
416 * This test should be read:
417 * Does it ever help to use udiv_qrnnd_preinv?
418 * && Does what we save compensate for the inversion overhead?
419 */
420 if (UDIV_TIME > (2 * UMUL_TIME + 6)
421 && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
422 int normalization_steps;
423
424 count_leading_zeros(normalization_steps, divisor_limb);
425 if (normalization_steps) {
426 mpi_limb_t divisor_limb_inverted;
427
428 divisor_limb <<= normalization_steps;
429
430 /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
431 * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
432 * most significant bit (with weight 2**N) implicit.
433 */
434 /* Special case for DIVISOR_LIMB == 100...000. */
435 if (!(divisor_limb << 1))
436 divisor_limb_inverted = ~(mpi_limb_t) 0;
437 else
438 udiv_qrnnd(divisor_limb_inverted, dummy,
439 -divisor_limb, 0, divisor_limb);
440
441 n1 = dividend_ptr[dividend_size - 1];
442 r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
443
444 /* Possible optimization:
445 * if (r == 0
446 * && divisor_limb > ((n1 << normalization_steps)
447 * | (dividend_ptr[dividend_size - 2] >> ...)))
448 * ...one division less...
449 */
450 for (i = dividend_size - 2; i >= 0; i--) {
451 n0 = dividend_ptr[i];
452 UDIV_QRNND_PREINV(quot_ptr[i + 1], r, r,
453 ((n1 << normalization_steps)
454 | (n0 >>
455 (BITS_PER_MPI_LIMB -
456 normalization_steps))),
457 divisor_limb,
458 divisor_limb_inverted);
459 n1 = n0;
460 }
461 UDIV_QRNND_PREINV(quot_ptr[0], r, r,
462 n1 << normalization_steps,
463 divisor_limb, divisor_limb_inverted);
464 return r >> normalization_steps;
465 } else {
466 mpi_limb_t divisor_limb_inverted;
467
468 /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
469 * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
470 * most significant bit (with weight 2**N) implicit.
471 */
472 /* Special case for DIVISOR_LIMB == 100...000. */
473 if (!(divisor_limb << 1))
474 divisor_limb_inverted = ~(mpi_limb_t) 0;
475 else
476 udiv_qrnnd(divisor_limb_inverted, dummy,
477 -divisor_limb, 0, divisor_limb);
478
479 i = dividend_size - 1;
480 r = dividend_ptr[i];
481
482 if (r >= divisor_limb)
483 r = 0;
484 else
485 quot_ptr[i--] = 0;
486
487 for (; i >= 0; i--) {
488 n0 = dividend_ptr[i];
489 UDIV_QRNND_PREINV(quot_ptr[i], r, r,
490 n0, divisor_limb,
491 divisor_limb_inverted);
492 }
493 return r;
494 }
495 } else {
496 if (UDIV_NEEDS_NORMALIZATION) {
497 int normalization_steps;
498
499 count_leading_zeros(normalization_steps, divisor_limb);
500 if (normalization_steps) {
501 divisor_limb <<= normalization_steps;
502
503 n1 = dividend_ptr[dividend_size - 1];
504 r = n1 >> (BITS_PER_MPI_LIMB -
505 normalization_steps);
506
507 /* Possible optimization:
508 * if (r == 0
509 * && divisor_limb > ((n1 << normalization_steps)
510 * | (dividend_ptr[dividend_size - 2] >> ...)))
511 * ...one division less...
512 */
513 for (i = dividend_size - 2; i >= 0; i--) {
514 n0 = dividend_ptr[i];
515 udiv_qrnnd(quot_ptr[i + 1], r, r,
516 ((n1 << normalization_steps)
517 | (n0 >>
518 (BITS_PER_MPI_LIMB -
519 normalization_steps))),
520 divisor_limb);
521 n1 = n0;
522 }
523 udiv_qrnnd(quot_ptr[0], r, r,
524 n1 << normalization_steps,
525 divisor_limb);
526 return r >> normalization_steps;
527 }
528 }
529 /* No normalization needed, either because udiv_qrnnd doesn't require
530 * it, or because DIVISOR_LIMB is already normalized. */
531 i = dividend_size - 1;
532 r = dividend_ptr[i];
533
534 if (r >= divisor_limb)
535 r = 0;
536 else
537 quot_ptr[i--] = 0;
538
539 for (; i >= 0; i--) {
540 n0 = dividend_ptr[i];
541 udiv_qrnnd(quot_ptr[i], r, r, n0, divisor_limb);
542 }
543 return r;
544 }
545}
diff --git a/lib/mpi/mpih-mul.c b/lib/mpi/mpih-mul.c
index c69c5eef233b..7c841719fdfb 100644
--- a/lib/mpi/mpih-mul.c
+++ b/lib/mpi/mpih-mul.c
@@ -330,36 +330,6 @@ mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace)
330 } 330 }
331} 331}
332 332
333/* This should be made into an inline function in gmp.h. */
334int mpihelp_mul_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size)
335{
336 if (up == vp) {
337 if (size < KARATSUBA_THRESHOLD)
338 mpih_sqr_n_basecase(prodp, up, size);
339 else {
340 mpi_ptr_t tspace;
341 tspace = mpi_alloc_limb_space(2 * size);
342 if (!tspace)
343 return -ENOMEM;
344 mpih_sqr_n(prodp, up, size, tspace);
345 mpi_free_limb_space(tspace);
346 }
347 } else {
348 if (size < KARATSUBA_THRESHOLD)
349 mul_n_basecase(prodp, up, vp, size);
350 else {
351 mpi_ptr_t tspace;
352 tspace = mpi_alloc_limb_space(2 * size);
353 if (!tspace)
354 return -ENOMEM;
355 mul_n(prodp, up, vp, size, tspace);
356 mpi_free_limb_space(tspace);
357 }
358 }
359
360 return 0;
361}
362
363int 333int
364mpihelp_mul_karatsuba_case(mpi_ptr_t prodp, 334mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
365 mpi_ptr_t up, mpi_size_t usize, 335 mpi_ptr_t up, mpi_size_t usize,
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index 26e4ed31e256..657979f71bef 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -106,13 +106,6 @@ int mpi_resize(MPI a, unsigned nlimbs)
106 return 0; 106 return 0;
107} 107}
108 108
109void mpi_clear(MPI a)
110{
111 a->nlimbs = 0;
112 a->nbits = 0;
113 a->flags = 0;
114}
115
116void mpi_free(MPI a) 109void mpi_free(MPI a)
117{ 110{
118 if (!a) 111 if (!a)
@@ -128,84 +121,3 @@ void mpi_free(MPI a)
128 kfree(a); 121 kfree(a);
129} 122}
130EXPORT_SYMBOL_GPL(mpi_free); 123EXPORT_SYMBOL_GPL(mpi_free);
131
132/****************
133 * Note: This copy function should not interpret the MPI
134 * but copy it transparently.
135 */
136int mpi_copy(MPI *copied, const MPI a)
137{
138 size_t i;
139 MPI b;
140
141 *copied = NULL;
142
143 if (a) {
144 b = mpi_alloc(a->nlimbs);
145 if (!b)
146 return -ENOMEM;
147
148 b->nlimbs = a->nlimbs;
149 b->sign = a->sign;
150 b->flags = a->flags;
151 b->nbits = a->nbits;
152
153 for (i = 0; i < b->nlimbs; i++)
154 b->d[i] = a->d[i];
155
156 *copied = b;
157 }
158
159 return 0;
160}
161
162int mpi_set(MPI w, const MPI u)
163{
164 mpi_ptr_t wp, up;
165 mpi_size_t usize = u->nlimbs;
166 int usign = u->sign;
167
168 if (RESIZE_IF_NEEDED(w, (size_t) usize) < 0)
169 return -ENOMEM;
170
171 wp = w->d;
172 up = u->d;
173 MPN_COPY(wp, up, usize);
174 w->nlimbs = usize;
175 w->nbits = u->nbits;
176 w->flags = u->flags;
177 w->sign = usign;
178 return 0;
179}
180
181int mpi_set_ui(MPI w, unsigned long u)
182{
183 if (RESIZE_IF_NEEDED(w, 1) < 0)
184 return -ENOMEM;
185 w->d[0] = u;
186 w->nlimbs = u ? 1 : 0;
187 w->sign = 0;
188 w->nbits = 0;
189 w->flags = 0;
190 return 0;
191}
192
193MPI mpi_alloc_set_ui(unsigned long u)
194{
195 MPI w = mpi_alloc(1);
196 if (!w)
197 return w;
198 w->d[0] = u;
199 w->nlimbs = u ? 1 : 0;
200 w->sign = 0;
201 return w;
202}
203
204void mpi_swap(MPI a, MPI b)
205{
206 struct gcry_mpi tmp;
207
208 tmp = *a;
209 *a = *b;
210 *b = tmp;
211}