aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/random.c118
1 files changed, 86 insertions, 32 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 6da3f250804c..84c576ec20e9 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -255,6 +255,7 @@
255#include <linux/fips.h> 255#include <linux/fips.h>
256#include <linux/ptrace.h> 256#include <linux/ptrace.h>
257#include <linux/kmemcheck.h> 257#include <linux/kmemcheck.h>
258#include <linux/workqueue.h>
258 259
259#ifdef CONFIG_GENERIC_HARDIRQS 260#ifdef CONFIG_GENERIC_HARDIRQS
260# include <linux/irq.h> 261# include <linux/irq.h>
@@ -302,7 +303,7 @@ static int random_read_wakeup_thresh = 64;
302 * should wake up processes which are selecting or polling on write 303 * should wake up processes which are selecting or polling on write
303 * access to /dev/random. 304 * access to /dev/random.
304 */ 305 */
305static int random_write_wakeup_thresh = 128; 306static int random_write_wakeup_thresh = 28 * OUTPUT_POOL_WORDS;
306 307
307/* 308/*
308 * The minimum number of seconds between urandom pool resending. We 309 * The minimum number of seconds between urandom pool resending. We
@@ -428,6 +429,7 @@ struct entropy_store {
428 __u32 *pool; 429 __u32 *pool;
429 const char *name; 430 const char *name;
430 struct entropy_store *pull; 431 struct entropy_store *pull;
432 struct work_struct push_work;
431 433
432 /* read-write data: */ 434 /* read-write data: */
433 unsigned long last_pulled; 435 unsigned long last_pulled;
@@ -442,6 +444,7 @@ struct entropy_store {
442 __u8 last_data[EXTRACT_SIZE]; 444 __u8 last_data[EXTRACT_SIZE];
443}; 445};
444 446
447static void push_to_pool(struct work_struct *work);
445static __u32 input_pool_data[INPUT_POOL_WORDS]; 448static __u32 input_pool_data[INPUT_POOL_WORDS];
446static __u32 blocking_pool_data[OUTPUT_POOL_WORDS]; 449static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
447static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS]; 450static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
@@ -460,7 +463,9 @@ static struct entropy_store blocking_pool = {
460 .limit = 1, 463 .limit = 1,
461 .pull = &input_pool, 464 .pull = &input_pool,
462 .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock), 465 .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
463 .pool = blocking_pool_data 466 .pool = blocking_pool_data,
467 .push_work = __WORK_INITIALIZER(blocking_pool.push_work,
468 push_to_pool),
464}; 469};
465 470
466static struct entropy_store nonblocking_pool = { 471static struct entropy_store nonblocking_pool = {
@@ -468,7 +473,9 @@ static struct entropy_store nonblocking_pool = {
468 .name = "nonblocking", 473 .name = "nonblocking",
469 .pull = &input_pool, 474 .pull = &input_pool,
470 .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock), 475 .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
471 .pool = nonblocking_pool_data 476 .pool = nonblocking_pool_data,
477 .push_work = __WORK_INITIALIZER(nonblocking_pool.push_work,
478 push_to_pool),
472}; 479};
473 480
474static __u32 const twist_table[8] = { 481static __u32 const twist_table[8] = {
@@ -655,21 +662,48 @@ retry:
655 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) 662 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
656 goto retry; 663 goto retry;
657 664
665 r->entropy_total += nbits;
658 if (!r->initialized && nbits > 0) { 666 if (!r->initialized && nbits > 0) {
659 r->entropy_total += nbits; 667 if (r->entropy_total > 128) {
660 if (r->entropy_total > 128)
661 r->initialized = 1; 668 r->initialized = 1;
669 r->entropy_total = 0;
670 }
662 } 671 }
663 672
664 trace_credit_entropy_bits(r->name, nbits, 673 trace_credit_entropy_bits(r->name, nbits,
665 entropy_count >> ENTROPY_SHIFT, 674 entropy_count >> ENTROPY_SHIFT,
666 r->entropy_total, _RET_IP_); 675 r->entropy_total, _RET_IP_);
667 676
668 /* should we wake readers? */ 677 if (r == &input_pool) {
669 if (r == &input_pool && 678 int entropy_bytes = entropy_count >> ENTROPY_SHIFT;
670 (entropy_count >> ENTROPY_SHIFT) >= random_read_wakeup_thresh) { 679
671 wake_up_interruptible(&random_read_wait); 680 /* should we wake readers? */
672 kill_fasync(&fasync, SIGIO, POLL_IN); 681 if (entropy_bytes >= random_read_wakeup_thresh) {
682 wake_up_interruptible(&random_read_wait);
683 kill_fasync(&fasync, SIGIO, POLL_IN);
684 }
685 /* If the input pool is getting full, send some
686 * entropy to the two output pools, flipping back and
687 * forth between them, until the output pools are 75%
688 * full.
689 */
690 if (entropy_bytes > random_write_wakeup_thresh &&
691 r->initialized &&
692 r->entropy_total >= 2*random_read_wakeup_thresh) {
693 static struct entropy_store *last = &blocking_pool;
694 struct entropy_store *other = &blocking_pool;
695
696 if (last == &blocking_pool)
697 other = &nonblocking_pool;
698 if (other->entropy_count <=
699 3 * other->poolinfo->poolfracbits / 4)
700 last = other;
701 if (last->entropy_count <=
702 3 * last->poolinfo->poolfracbits / 4) {
703 schedule_work(&last->push_work);
704 r->entropy_total = 0;
705 }
706 }
673 } 707 }
674} 708}
675 709
@@ -877,10 +911,9 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
877 * from the primary pool to the secondary extraction pool. We make 911 * from the primary pool to the secondary extraction pool. We make
878 * sure we pull enough for a 'catastrophic reseed'. 912 * sure we pull enough for a 'catastrophic reseed'.
879 */ 913 */
914static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
880static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) 915static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
881{ 916{
882 __u32 tmp[OUTPUT_POOL_WORDS];
883
884 if (r->limit == 0 && random_min_urandom_seed) { 917 if (r->limit == 0 && random_min_urandom_seed) {
885 unsigned long now = jiffies; 918 unsigned long now = jiffies;
886 919
@@ -891,26 +924,47 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
891 } 924 }
892 if (r->pull && 925 if (r->pull &&
893 r->entropy_count < (nbytes << (ENTROPY_SHIFT + 3)) && 926 r->entropy_count < (nbytes << (ENTROPY_SHIFT + 3)) &&
894 r->entropy_count < r->poolinfo->poolfracbits) { 927 r->entropy_count < r->poolinfo->poolfracbits)
895 /* If we're limited, always leave two wakeup worth's BITS */ 928 _xfer_secondary_pool(r, nbytes);
896 int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4; 929}
897 int bytes = nbytes; 930
898 931static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
899 /* pull at least as many as BYTES as wakeup BITS */ 932{
900 bytes = max_t(int, bytes, random_read_wakeup_thresh / 8); 933 __u32 tmp[OUTPUT_POOL_WORDS];
901 /* but never more than the buffer size */ 934
902 bytes = min_t(int, bytes, sizeof(tmp)); 935 /* For /dev/random's pool, always leave two wakeup worth's BITS */
903 936 int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
904 DEBUG_ENT("going to reseed %s with %d bits " 937 int bytes = nbytes;
905 "(%zu of %d requested)\n", 938
906 r->name, bytes * 8, nbytes * 8, 939 /* pull at least as many as BYTES as wakeup BITS */
907 r->entropy_count >> ENTROPY_SHIFT); 940 bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
908 941 /* but never more than the buffer size */
909 bytes = extract_entropy(r->pull, tmp, bytes, 942 bytes = min_t(int, bytes, sizeof(tmp));
910 random_read_wakeup_thresh / 8, rsvd); 943
911 mix_pool_bytes(r, tmp, bytes, NULL); 944 DEBUG_ENT("going to reseed %s with %d bits (%zu of %d requested)\n",
912 credit_entropy_bits(r, bytes*8); 945 r->name, bytes * 8, nbytes * 8,
913 } 946 r->entropy_count >> ENTROPY_SHIFT);
947
948 bytes = extract_entropy(r->pull, tmp, bytes,
949 random_read_wakeup_thresh / 8, rsvd);
950 mix_pool_bytes(r, tmp, bytes, NULL);
951 credit_entropy_bits(r, bytes*8);
952}
953
954/*
955 * Used as a workqueue function so that when the input pool is getting
956 * full, we can "spill over" some entropy to the output pools. That
957 * way the output pools can store some of the excess entropy instead
958 * of letting it go to waste.
959 */
960static void push_to_pool(struct work_struct *work)
961{
962 struct entropy_store *r = container_of(work, struct entropy_store,
963 push_work);
964 BUG_ON(!r);
965 _xfer_secondary_pool(r, random_read_wakeup_thresh/8);
966 trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
967 r->pull->entropy_count >> ENTROPY_SHIFT);
914} 968}
915 969
916/* 970/*