aboutsummaryrefslogtreecommitdiffstats
path: root/lib/mpi/mpih-div.c
diff options
context:
space:
mode:
authorDmitry Kasatkin <dmitry.kasatkin@intel.com>2012-05-09 10:37:56 -0400
committerJames Morris <james.l.morris@oracle.com>2012-05-25 21:51:03 -0400
commit7cf4206a99d1b3e61bdbc7cbbf4a7bf6a9dfcc68 (patch)
treea77ad8cffa35427352cd18f94ed9339640e4d473 /lib/mpi/mpih-div.c
parent9e235dcaf4f63d88a7e9ce5735ba5c2eb2719603 (diff)
Remove unused code from MPI library
MPI library is used by RSA verification implementation. Few files contains functions which are never called. James Morris has asked to remove all of them. Signed-off-by: Dmitry Kasatkin <dmitry.kasatkin@intel.com> Requested-by: James Morris <james.l.morris@oracle.com> Signed-off-by: James Morris <james.l.morris@oracle.com>
Diffstat (limited to 'lib/mpi/mpih-div.c')
-rw-r--r--lib/mpi/mpih-div.c309
1 files changed, 0 insertions, 309 deletions
diff --git a/lib/mpi/mpih-div.c b/lib/mpi/mpih-div.c
index cde1aaec18d..c57d1d46295 100644
--- a/lib/mpi/mpih-div.c
+++ b/lib/mpi/mpih-div.c
@@ -37,159 +37,6 @@
37#define UDIV_TIME UMUL_TIME 37#define UDIV_TIME UMUL_TIME
38#endif 38#endif
39 39
40/* FIXME: We should be using invert_limb (or invert_normalized_limb)
41 * here (not udiv_qrnnd).
42 */
43
44mpi_limb_t
45mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
46 mpi_limb_t divisor_limb)
47{
48 mpi_size_t i;
49 mpi_limb_t n1, n0, r;
50 int dummy;
51
52 /* Botch: Should this be handled at all? Rely on callers? */
53 if (!dividend_size)
54 return 0;
55
56 /* If multiplication is much faster than division, and the
57 * dividend is large, pre-invert the divisor, and use
58 * only multiplications in the inner loop.
59 *
60 * This test should be read:
61 * Does it ever help to use udiv_qrnnd_preinv?
62 * && Does what we save compensate for the inversion overhead?
63 */
64 if (UDIV_TIME > (2 * UMUL_TIME + 6)
65 && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
66 int normalization_steps;
67
68 count_leading_zeros(normalization_steps, divisor_limb);
69 if (normalization_steps) {
70 mpi_limb_t divisor_limb_inverted;
71
72 divisor_limb <<= normalization_steps;
73
74 /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
75 * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
76 * most significant bit (with weight 2**N) implicit.
77 *
78 * Special case for DIVISOR_LIMB == 100...000.
79 */
80 if (!(divisor_limb << 1))
81 divisor_limb_inverted = ~(mpi_limb_t) 0;
82 else
83 udiv_qrnnd(divisor_limb_inverted, dummy,
84 -divisor_limb, 0, divisor_limb);
85
86 n1 = dividend_ptr[dividend_size - 1];
87 r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
88
89 /* Possible optimization:
90 * if (r == 0
91 * && divisor_limb > ((n1 << normalization_steps)
92 * | (dividend_ptr[dividend_size - 2] >> ...)))
93 * ...one division less...
94 */
95 for (i = dividend_size - 2; i >= 0; i--) {
96 n0 = dividend_ptr[i];
97 UDIV_QRNND_PREINV(dummy, r, r,
98 ((n1 << normalization_steps)
99 | (n0 >>
100 (BITS_PER_MPI_LIMB -
101 normalization_steps))),
102 divisor_limb,
103 divisor_limb_inverted);
104 n1 = n0;
105 }
106 UDIV_QRNND_PREINV(dummy, r, r,
107 n1 << normalization_steps,
108 divisor_limb, divisor_limb_inverted);
109 return r >> normalization_steps;
110 } else {
111 mpi_limb_t divisor_limb_inverted;
112
113 /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
114 * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
115 * most significant bit (with weight 2**N) implicit.
116 *
117 * Special case for DIVISOR_LIMB == 100...000.
118 */
119 if (!(divisor_limb << 1))
120 divisor_limb_inverted = ~(mpi_limb_t) 0;
121 else
122 udiv_qrnnd(divisor_limb_inverted, dummy,
123 -divisor_limb, 0, divisor_limb);
124
125 i = dividend_size - 1;
126 r = dividend_ptr[i];
127
128 if (r >= divisor_limb)
129 r = 0;
130 else
131 i--;
132
133 for (; i >= 0; i--) {
134 n0 = dividend_ptr[i];
135 UDIV_QRNND_PREINV(dummy, r, r,
136 n0, divisor_limb,
137 divisor_limb_inverted);
138 }
139 return r;
140 }
141 } else {
142 if (UDIV_NEEDS_NORMALIZATION) {
143 int normalization_steps;
144
145 count_leading_zeros(normalization_steps, divisor_limb);
146 if (normalization_steps) {
147 divisor_limb <<= normalization_steps;
148
149 n1 = dividend_ptr[dividend_size - 1];
150 r = n1 >> (BITS_PER_MPI_LIMB -
151 normalization_steps);
152
153 /* Possible optimization:
154 * if (r == 0
155 * && divisor_limb > ((n1 << normalization_steps)
156 * | (dividend_ptr[dividend_size - 2] >> ...)))
157 * ...one division less...
158 */
159 for (i = dividend_size - 2; i >= 0; i--) {
160 n0 = dividend_ptr[i];
161 udiv_qrnnd(dummy, r, r,
162 ((n1 << normalization_steps)
163 | (n0 >>
164 (BITS_PER_MPI_LIMB -
165 normalization_steps))),
166 divisor_limb);
167 n1 = n0;
168 }
169 udiv_qrnnd(dummy, r, r,
170 n1 << normalization_steps,
171 divisor_limb);
172 return r >> normalization_steps;
173 }
174 }
175 /* No normalization needed, either because udiv_qrnnd doesn't require
176 * it, or because DIVISOR_LIMB is already normalized. */
177 i = dividend_size - 1;
178 r = dividend_ptr[i];
179
180 if (r >= divisor_limb)
181 r = 0;
182 else
183 i--;
184
185 for (; i >= 0; i--) {
186 n0 = dividend_ptr[i];
187 udiv_qrnnd(dummy, r, r, n0, divisor_limb);
188 }
189 return r;
190 }
191}
192
193/* Divide num (NP/NSIZE) by den (DP/DSIZE) and write 40/* Divide num (NP/NSIZE) by den (DP/DSIZE) and write
194 * the NSIZE-DSIZE least significant quotient limbs at QP 41 * the NSIZE-DSIZE least significant quotient limbs at QP
195 * and the DSIZE long remainder at NP. If QEXTRA_LIMBS is 42 * and the DSIZE long remainder at NP. If QEXTRA_LIMBS is
@@ -387,159 +234,3 @@ q_test:
387 234
388 return most_significant_q_limb; 235 return most_significant_q_limb;
389} 236}
390
391/****************
392 * Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB.
393 * Write DIVIDEND_SIZE limbs of quotient at QUOT_PTR.
394 * Return the single-limb remainder.
395 * There are no constraints on the value of the divisor.
396 *
397 * QUOT_PTR and DIVIDEND_PTR might point to the same limb.
398 */
399
400mpi_limb_t
401mpihelp_divmod_1(mpi_ptr_t quot_ptr,
402 mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
403 mpi_limb_t divisor_limb)
404{
405 mpi_size_t i;
406 mpi_limb_t n1, n0, r;
407 int dummy;
408
409 if (!dividend_size)
410 return 0;
411
412 /* If multiplication is much faster than division, and the
413 * dividend is large, pre-invert the divisor, and use
414 * only multiplications in the inner loop.
415 *
416 * This test should be read:
417 * Does it ever help to use udiv_qrnnd_preinv?
418 * && Does what we save compensate for the inversion overhead?
419 */
420 if (UDIV_TIME > (2 * UMUL_TIME + 6)
421 && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
422 int normalization_steps;
423
424 count_leading_zeros(normalization_steps, divisor_limb);
425 if (normalization_steps) {
426 mpi_limb_t divisor_limb_inverted;
427
428 divisor_limb <<= normalization_steps;
429
430 /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
431 * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
432 * most significant bit (with weight 2**N) implicit.
433 */
434 /* Special case for DIVISOR_LIMB == 100...000. */
435 if (!(divisor_limb << 1))
436 divisor_limb_inverted = ~(mpi_limb_t) 0;
437 else
438 udiv_qrnnd(divisor_limb_inverted, dummy,
439 -divisor_limb, 0, divisor_limb);
440
441 n1 = dividend_ptr[dividend_size - 1];
442 r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
443
444 /* Possible optimization:
445 * if (r == 0
446 * && divisor_limb > ((n1 << normalization_steps)
447 * | (dividend_ptr[dividend_size - 2] >> ...)))
448 * ...one division less...
449 */
450 for (i = dividend_size - 2; i >= 0; i--) {
451 n0 = dividend_ptr[i];
452 UDIV_QRNND_PREINV(quot_ptr[i + 1], r, r,
453 ((n1 << normalization_steps)
454 | (n0 >>
455 (BITS_PER_MPI_LIMB -
456 normalization_steps))),
457 divisor_limb,
458 divisor_limb_inverted);
459 n1 = n0;
460 }
461 UDIV_QRNND_PREINV(quot_ptr[0], r, r,
462 n1 << normalization_steps,
463 divisor_limb, divisor_limb_inverted);
464 return r >> normalization_steps;
465 } else {
466 mpi_limb_t divisor_limb_inverted;
467
468 /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
469 * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
470 * most significant bit (with weight 2**N) implicit.
471 */
472 /* Special case for DIVISOR_LIMB == 100...000. */
473 if (!(divisor_limb << 1))
474 divisor_limb_inverted = ~(mpi_limb_t) 0;
475 else
476 udiv_qrnnd(divisor_limb_inverted, dummy,
477 -divisor_limb, 0, divisor_limb);
478
479 i = dividend_size - 1;
480 r = dividend_ptr[i];
481
482 if (r >= divisor_limb)
483 r = 0;
484 else
485 quot_ptr[i--] = 0;
486
487 for (; i >= 0; i--) {
488 n0 = dividend_ptr[i];
489 UDIV_QRNND_PREINV(quot_ptr[i], r, r,
490 n0, divisor_limb,
491 divisor_limb_inverted);
492 }
493 return r;
494 }
495 } else {
496 if (UDIV_NEEDS_NORMALIZATION) {
497 int normalization_steps;
498
499 count_leading_zeros(normalization_steps, divisor_limb);
500 if (normalization_steps) {
501 divisor_limb <<= normalization_steps;
502
503 n1 = dividend_ptr[dividend_size - 1];
504 r = n1 >> (BITS_PER_MPI_LIMB -
505 normalization_steps);
506
507 /* Possible optimization:
508 * if (r == 0
509 * && divisor_limb > ((n1 << normalization_steps)
510 * | (dividend_ptr[dividend_size - 2] >> ...)))
511 * ...one division less...
512 */
513 for (i = dividend_size - 2; i >= 0; i--) {
514 n0 = dividend_ptr[i];
515 udiv_qrnnd(quot_ptr[i + 1], r, r,
516 ((n1 << normalization_steps)
517 | (n0 >>
518 (BITS_PER_MPI_LIMB -
519 normalization_steps))),
520 divisor_limb);
521 n1 = n0;
522 }
523 udiv_qrnnd(quot_ptr[0], r, r,
524 n1 << normalization_steps,
525 divisor_limb);
526 return r >> normalization_steps;
527 }
528 }
529 /* No normalization needed, either because udiv_qrnnd doesn't require
530 * it, or because DIVISOR_LIMB is already normalized. */
531 i = dividend_size - 1;
532 r = dividend_ptr[i];
533
534 if (r >= divisor_limb)
535 r = 0;
536 else
537 quot_ptr[i--] = 0;
538
539 for (; i >= 0; i--) {
540 n0 = dividend_ptr[i];
541 udiv_qrnnd(quot_ptr[i], r, r, n0, divisor_limb);
542 }
543 return r;
544 }
545}