summaryrefslogtreecommitdiffstats
path: root/drivers/char/random.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2017-06-07 19:45:31 -0400
committerTheodore Ts'o <tytso@mit.edu>2017-06-07 19:45:37 -0400
commitb169c13de473a85b3c859bb36216a4cb5f00a54a (patch)
tree4e6078fbf5f50be9fb113087f4affa3d19f1a25a /drivers/char/random.c
parent92e75428ffc90e2a0321062379f883f3671cfebe (diff)
random: invalidate batched entropy after crng init
It's possible that get_random_{u32,u64} is used before the crng has initialized, in which case, its output might not be cryptographically secure. For this problem, directly, this patch set is introducing the *_wait variety of functions, but even with that, there's a subtle issue: what happens to our batched entropy that was generated before initialization. Prior to this commit, it'd stick around, supplying bad numbers. After this commit, we force the entropy to be re-extracted after each phase of the crng has initialized. In order to avoid a race condition with the position counter, we introduce a simple rwlock for this invalidation. Since it's only during this awkward transition period, after things are all set up, we stop using it, so that it doesn't have an impact on performance. Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Theodore Ts'o <tytso@mit.edu> Cc: stable@vger.kernel.org # v4.11+
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c37
1 files changed, 37 insertions, 0 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 473ad34378f2..e870f329db88 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,6 +1,9 @@
1/* 1/*
2 * random.c -- A strong random number generator 2 * random.c -- A strong random number generator
3 * 3 *
4 * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
5 * Rights Reserved.
6 *
4 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 7 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
5 * 8 *
6 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All 9 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
@@ -762,6 +765,8 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
762static struct crng_state **crng_node_pool __read_mostly; 765static struct crng_state **crng_node_pool __read_mostly;
763#endif 766#endif
764 767
768static void invalidate_batched_entropy(void);
769
765static void crng_initialize(struct crng_state *crng) 770static void crng_initialize(struct crng_state *crng)
766{ 771{
767 int i; 772 int i;
@@ -799,6 +804,7 @@ static int crng_fast_load(const char *cp, size_t len)
799 cp++; crng_init_cnt++; len--; 804 cp++; crng_init_cnt++; len--;
800 } 805 }
801 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { 806 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
807 invalidate_batched_entropy();
802 crng_init = 1; 808 crng_init = 1;
803 wake_up_interruptible(&crng_init_wait); 809 wake_up_interruptible(&crng_init_wait);
804 pr_notice("random: fast init done\n"); 810 pr_notice("random: fast init done\n");
@@ -836,6 +842,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
836 memzero_explicit(&buf, sizeof(buf)); 842 memzero_explicit(&buf, sizeof(buf));
837 crng->init_time = jiffies; 843 crng->init_time = jiffies;
838 if (crng == &primary_crng && crng_init < 2) { 844 if (crng == &primary_crng && crng_init < 2) {
845 invalidate_batched_entropy();
839 crng_init = 2; 846 crng_init = 2;
840 process_random_ready_list(); 847 process_random_ready_list();
841 wake_up_interruptible(&crng_init_wait); 848 wake_up_interruptible(&crng_init_wait);
@@ -2023,6 +2030,7 @@ struct batched_entropy {
2023 }; 2030 };
2024 unsigned int position; 2031 unsigned int position;
2025}; 2032};
2033static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
2026 2034
2027/* 2035/*
2028 * Get a random word for internal kernel use only. The quality of the random 2036 * Get a random word for internal kernel use only. The quality of the random
@@ -2033,6 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
2033u64 get_random_u64(void) 2041u64 get_random_u64(void)
2034{ 2042{
2035 u64 ret; 2043 u64 ret;
2044 bool use_lock = crng_init < 2;
2045 unsigned long flags;
2036 struct batched_entropy *batch; 2046 struct batched_entropy *batch;
2037 2047
2038#if BITS_PER_LONG == 64 2048#if BITS_PER_LONG == 64
@@ -2045,11 +2055,15 @@ u64 get_random_u64(void)
2045#endif 2055#endif
2046 2056
2047 batch = &get_cpu_var(batched_entropy_u64); 2057 batch = &get_cpu_var(batched_entropy_u64);
2058 if (use_lock)
2059 read_lock_irqsave(&batched_entropy_reset_lock, flags);
2048 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { 2060 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2049 extract_crng((u8 *)batch->entropy_u64); 2061 extract_crng((u8 *)batch->entropy_u64);
2050 batch->position = 0; 2062 batch->position = 0;
2051 } 2063 }
2052 ret = batch->entropy_u64[batch->position++]; 2064 ret = batch->entropy_u64[batch->position++];
2065 if (use_lock)
2066 read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2053 put_cpu_var(batched_entropy_u64); 2067 put_cpu_var(batched_entropy_u64);
2054 return ret; 2068 return ret;
2055} 2069}
@@ -2059,22 +2073,45 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
2059u32 get_random_u32(void) 2073u32 get_random_u32(void)
2060{ 2074{
2061 u32 ret; 2075 u32 ret;
2076 bool use_lock = crng_init < 2;
2077 unsigned long flags;
2062 struct batched_entropy *batch; 2078 struct batched_entropy *batch;
2063 2079
2064 if (arch_get_random_int(&ret)) 2080 if (arch_get_random_int(&ret))
2065 return ret; 2081 return ret;
2066 2082
2067 batch = &get_cpu_var(batched_entropy_u32); 2083 batch = &get_cpu_var(batched_entropy_u32);
2084 if (use_lock)
2085 read_lock_irqsave(&batched_entropy_reset_lock, flags);
2068 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { 2086 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2069 extract_crng((u8 *)batch->entropy_u32); 2087 extract_crng((u8 *)batch->entropy_u32);
2070 batch->position = 0; 2088 batch->position = 0;
2071 } 2089 }
2072 ret = batch->entropy_u32[batch->position++]; 2090 ret = batch->entropy_u32[batch->position++];
2091 if (use_lock)
2092 read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2073 put_cpu_var(batched_entropy_u32); 2093 put_cpu_var(batched_entropy_u32);
2074 return ret; 2094 return ret;
2075} 2095}
2076EXPORT_SYMBOL(get_random_u32); 2096EXPORT_SYMBOL(get_random_u32);
2077 2097
2098/* It's important to invalidate all potential batched entropy that might
2099 * be stored before the crng is initialized, which we can do lazily by
2100 * simply resetting the counter to zero so that it's re-extracted on the
2101 * next usage. */
2102static void invalidate_batched_entropy(void)
2103{
2104 int cpu;
2105 unsigned long flags;
2106
2107 write_lock_irqsave(&batched_entropy_reset_lock, flags);
2108 for_each_possible_cpu (cpu) {
2109 per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
2110 per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
2111 }
2112 write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2113}
2114
2078/** 2115/**
2079 * randomize_page - Generate a random, page aligned address 2116 * randomize_page - Generate a random, page aligned address
2080 * @start: The smallest acceptable address the caller will take. 2117 * @start: The smallest acceptable address the caller will take.