aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2014-06-10 23:09:20 -0400
committerTheodore Ts'o <tytso@mit.edu>2014-07-15 04:49:39 -0400
commit85608f8e16c28f818f6bb9918958d231afa8bec2 (patch)
tree506408b80b020f5856874812045f7c826513a78e /drivers/char
parent91fcb532efe366d79b93a3c8c368b9dca6176a55 (diff)
random: remove unneeded hash of a portion of the entropy pool
We previously extracted a portion of the entropy pool in mix_pool_bytes() and hashed it in to avoid racing CPU's from returning duplicate random values. Now that we are using a spinlock to prevent this from happening, this is no longer necessary. So remove it, to simplify the code a bit. Signed-off-by: Theodore Ts'o <tytso@mit.edu> Cc: George Spelvin <linux@horizon.com>
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/random.c51
1 files changed, 20 insertions, 31 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 922a2e4089f9..bc0de22f31f4 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -481,9 +481,9 @@ static __u32 const twist_table[8] = {
481 * the entropy is concentrated in the low-order bits. 481 * the entropy is concentrated in the low-order bits.
482 */ 482 */
483static void _mix_pool_bytes(struct entropy_store *r, const void *in, 483static void _mix_pool_bytes(struct entropy_store *r, const void *in,
484 int nbytes, __u8 out[64]) 484 int nbytes)
485{ 485{
486 unsigned long i, j, tap1, tap2, tap3, tap4, tap5; 486 unsigned long i, tap1, tap2, tap3, tap4, tap5;
487 int input_rotate; 487 int input_rotate;
488 int wordmask = r->poolinfo->poolwords - 1; 488 int wordmask = r->poolinfo->poolwords - 1;
489 const char *bytes = in; 489 const char *bytes = in;
@@ -525,27 +525,23 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
525 525
526 r->input_rotate = input_rotate; 526 r->input_rotate = input_rotate;
527 r->add_ptr = i; 527 r->add_ptr = i;
528
529 if (out)
530 for (j = 0; j < 16; j++)
531 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
532} 528}
533 529
534static void __mix_pool_bytes(struct entropy_store *r, const void *in, 530static void __mix_pool_bytes(struct entropy_store *r, const void *in,
535 int nbytes, __u8 out[64]) 531 int nbytes)
536{ 532{
537 trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_); 533 trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
538 _mix_pool_bytes(r, in, nbytes, out); 534 _mix_pool_bytes(r, in, nbytes);
539} 535}
540 536
541static void mix_pool_bytes(struct entropy_store *r, const void *in, 537static void mix_pool_bytes(struct entropy_store *r, const void *in,
542 int nbytes, __u8 out[64]) 538 int nbytes)
543{ 539{
544 unsigned long flags; 540 unsigned long flags;
545 541
546 trace_mix_pool_bytes(r->name, nbytes, _RET_IP_); 542 trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
547 spin_lock_irqsave(&r->lock, flags); 543 spin_lock_irqsave(&r->lock, flags);
548 _mix_pool_bytes(r, in, nbytes, out); 544 _mix_pool_bytes(r, in, nbytes);
549 spin_unlock_irqrestore(&r->lock, flags); 545 spin_unlock_irqrestore(&r->lock, flags);
550} 546}
551 547
@@ -737,13 +733,13 @@ void add_device_randomness(const void *buf, unsigned int size)
737 733
738 trace_add_device_randomness(size, _RET_IP_); 734 trace_add_device_randomness(size, _RET_IP_);
739 spin_lock_irqsave(&input_pool.lock, flags); 735 spin_lock_irqsave(&input_pool.lock, flags);
740 _mix_pool_bytes(&input_pool, buf, size, NULL); 736 _mix_pool_bytes(&input_pool, buf, size);
741 _mix_pool_bytes(&input_pool, &time, sizeof(time), NULL); 737 _mix_pool_bytes(&input_pool, &time, sizeof(time));
742 spin_unlock_irqrestore(&input_pool.lock, flags); 738 spin_unlock_irqrestore(&input_pool.lock, flags);
743 739
744 spin_lock_irqsave(&nonblocking_pool.lock, flags); 740 spin_lock_irqsave(&nonblocking_pool.lock, flags);
745 _mix_pool_bytes(&nonblocking_pool, buf, size, NULL); 741 _mix_pool_bytes(&nonblocking_pool, buf, size);
746 _mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL); 742 _mix_pool_bytes(&nonblocking_pool, &time, sizeof(time));
747 spin_unlock_irqrestore(&nonblocking_pool.lock, flags); 743 spin_unlock_irqrestore(&nonblocking_pool.lock, flags);
748} 744}
749EXPORT_SYMBOL(add_device_randomness); 745EXPORT_SYMBOL(add_device_randomness);
@@ -776,7 +772,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
776 sample.cycles = random_get_entropy(); 772 sample.cycles = random_get_entropy();
777 sample.num = num; 773 sample.num = num;
778 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; 774 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
779 mix_pool_bytes(r, &sample, sizeof(sample), NULL); 775 mix_pool_bytes(r, &sample, sizeof(sample));
780 776
781 /* 777 /*
782 * Calculate number of bits of randomness we probably added. 778 * Calculate number of bits of randomness we probably added.
@@ -864,7 +860,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
864 return; 860 return;
865 } 861 }
866 fast_pool->last = now; 862 fast_pool->last = now;
867 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL); 863 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
868 864
869 /* 865 /*
870 * If we have architectural seed generator, produce a seed and 866 * If we have architectural seed generator, produce a seed and
@@ -872,7 +868,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
872 * 50% entropic. 868 * 50% entropic.
873 */ 869 */
874 if (arch_get_random_seed_long(&seed)) { 870 if (arch_get_random_seed_long(&seed)) {
875 __mix_pool_bytes(r, &seed, sizeof(seed), NULL); 871 __mix_pool_bytes(r, &seed, sizeof(seed));
876 credit += sizeof(seed) * 4; 872 credit += sizeof(seed) * 4;
877 } 873 }
878 spin_unlock(&r->lock); 874 spin_unlock(&r->lock);
@@ -955,7 +951,7 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
955 ENTROPY_BITS(r), ENTROPY_BITS(r->pull)); 951 ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
956 bytes = extract_entropy(r->pull, tmp, bytes, 952 bytes = extract_entropy(r->pull, tmp, bytes,
957 random_read_wakeup_bits / 8, rsvd_bytes); 953 random_read_wakeup_bits / 8, rsvd_bytes);
958 mix_pool_bytes(r, tmp, bytes, NULL); 954 mix_pool_bytes(r, tmp, bytes);
959 credit_entropy_bits(r, bytes*8); 955 credit_entropy_bits(r, bytes*8);
960} 956}
961 957
@@ -1031,7 +1027,6 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
1031 unsigned long l[LONGS(20)]; 1027 unsigned long l[LONGS(20)];
1032 } hash; 1028 } hash;
1033 __u32 workspace[SHA_WORKSPACE_WORDS]; 1029 __u32 workspace[SHA_WORKSPACE_WORDS];
1034 __u8 extract[64];
1035 unsigned long flags; 1030 unsigned long flags;
1036 1031
1037 /* 1032 /*
@@ -1060,15 +1055,9 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
1060 * brute-forcing the feedback as hard as brute-forcing the 1055 * brute-forcing the feedback as hard as brute-forcing the
1061 * hash. 1056 * hash.
1062 */ 1057 */
1063 __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract); 1058 __mix_pool_bytes(r, hash.w, sizeof(hash.w));
1064 spin_unlock_irqrestore(&r->lock, flags); 1059 spin_unlock_irqrestore(&r->lock, flags);
1065 1060
1066 /*
1067 * To avoid duplicates, we atomically extract a portion of the
1068 * pool while mixing, and hash one final time.
1069 */
1070 sha_transform(hash.w, extract, workspace);
1071 memset(extract, 0, sizeof(extract));
1072 memset(workspace, 0, sizeof(workspace)); 1061 memset(workspace, 0, sizeof(workspace));
1073 1062
1074 /* 1063 /*
@@ -1255,14 +1244,14 @@ static void init_std_data(struct entropy_store *r)
1255 unsigned long rv; 1244 unsigned long rv;
1256 1245
1257 r->last_pulled = jiffies; 1246 r->last_pulled = jiffies;
1258 mix_pool_bytes(r, &now, sizeof(now), NULL); 1247 mix_pool_bytes(r, &now, sizeof(now));
1259 for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) { 1248 for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
1260 if (!arch_get_random_seed_long(&rv) && 1249 if (!arch_get_random_seed_long(&rv) &&
1261 !arch_get_random_long(&rv)) 1250 !arch_get_random_long(&rv))
1262 rv = random_get_entropy(); 1251 rv = random_get_entropy();
1263 mix_pool_bytes(r, &rv, sizeof(rv), NULL); 1252 mix_pool_bytes(r, &rv, sizeof(rv));
1264 } 1253 }
1265 mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL); 1254 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
1266} 1255}
1267 1256
1268/* 1257/*
@@ -1325,7 +1314,7 @@ static int arch_random_refill(void)
1325 if (n) { 1314 if (n) {
1326 unsigned int rand_bytes = n * sizeof(unsigned long); 1315 unsigned int rand_bytes = n * sizeof(unsigned long);
1327 1316
1328 mix_pool_bytes(&input_pool, buf, rand_bytes, NULL); 1317 mix_pool_bytes(&input_pool, buf, rand_bytes);
1329 credit_entropy_bits(&input_pool, rand_bytes*4); 1318 credit_entropy_bits(&input_pool, rand_bytes*4);
1330 } 1319 }
1331 1320
@@ -1415,7 +1404,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1415 count -= bytes; 1404 count -= bytes;
1416 p += bytes; 1405 p += bytes;
1417 1406
1418 mix_pool_bytes(r, buf, bytes, NULL); 1407 mix_pool_bytes(r, buf, bytes);
1419 cond_resched(); 1408 cond_resched();
1420 } 1409 }
1421 1410