aboutsummaryrefslogtreecommitdiffstats
path: root/lib/random32.c
diff options
context:
space:
mode:
authorHannes Frederic Sowa <hannes@stressinduktion.org>2014-07-28 08:01:38 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-30 16:55:27 -0400
commit4ada97abe937cdb3fc029a871d5b0f21aa661a60 (patch)
tree74cd1b18f41a06e2c86e96565cd887206a28c87e /lib/random32.c
parentf139c74a8df071217dcd63f3ef06ae7be7071c4d (diff)
random32: mix in entropy from core to late initcall
Currently, we have a 3-stage seeding process in prandom(): Phase 1 is from the early actual initialization of prandom() subsystem which happens during core_initcall() and remains most likely until the beginning of late_initcall() phase. Here, the system might not have enough entropy available for seeding with strong randomness from the random driver. That means, we currently have a 32bit weak LCG() seeding the PRNG status register 1 and mixing that successively into the other 3 registers just to get it up and running. Phase 2 starts with late_initcall() phase resp. when the random driver has initialized its non-blocking pool with enough entropy. At that time, we throw away *all* inner state from its 4 registers and do a full reseed with strong randomness. Phase 3 starts right after that and does a periodic reseed with random slack of status register 1 by a strong random source again. A problem in phase 1 is that during bootup data structures can be initialized, e.g. on module load time, and thus access a weakly seeded prandom and are never changed for the rest of their live-time, thus carrying along the results from a week seed. Lets make sure that current but also future users access a possibly better early seeded prandom. This patch therefore improves phase 1 by trying to make it more 'unpredictable' through mixing in seed from a possible hardware source. Now, the mix-in xors inner state with the outcome of either of the two functions arch_get_random_{,seed}_int(), preferably arch_get_random_seed_int() as it likely represents a non-deterministic random bit generator in hw rather than a cryptographically secure PRNG in hw. However, not all might have the first one, so we use the PRNG as a fallback if available. As we xor the seed into the current state, the worst case would be that a hardware source could be unverifiable compromised or backdoored. In that case nevertheless it would be as good as our original early seeding function prandom_seed_very_weak() since we mix through xor which is entropy preserving. Joint work with Daniel Borkmann. Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/random32.c')
-rw-r--r--lib/random32.c49
1 files changed, 28 insertions, 21 deletions
diff --git a/lib/random32.c b/lib/random32.c
index fa5da61ce7ad..c9b6bf3afe0c 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -40,6 +40,10 @@
40 40
41#ifdef CONFIG_RANDOM32_SELFTEST 41#ifdef CONFIG_RANDOM32_SELFTEST
42static void __init prandom_state_selftest(void); 42static void __init prandom_state_selftest(void);
43#else
44static inline void prandom_state_selftest(void)
45{
46}
43#endif 47#endif
44 48
45static DEFINE_PER_CPU(struct rnd_state, net_rand_state); 49static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
@@ -53,8 +57,7 @@ static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
53 */ 57 */
54u32 prandom_u32_state(struct rnd_state *state) 58u32 prandom_u32_state(struct rnd_state *state)
55{ 59{
56#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b) 60#define TAUSWORTHE(s, a, b, c, d) ((s & c) << d) ^ (((s << a) ^ s) >> b)
57
58 state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U); 61 state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U);
59 state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U); 62 state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U);
60 state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U); 63 state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U);
@@ -147,21 +150,25 @@ static void prandom_warmup(struct rnd_state *state)
147 prandom_u32_state(state); 150 prandom_u32_state(state);
148} 151}
149 152
150static void prandom_seed_very_weak(struct rnd_state *state, u32 seed) 153static u32 __extract_hwseed(void)
151{ 154{
152 /* Note: This sort of seeding is ONLY used in test cases and 155 u32 val = 0;
153 * during boot at the time from core_initcall until late_initcall 156
154 * as we don't have a stronger entropy source available yet. 157 (void)(arch_get_random_seed_int(&val) ||
155 * After late_initcall, we reseed entire state, we have to (!), 158 arch_get_random_int(&val));
156 * otherwise an attacker just needs to search 32 bit space to 159
157 * probe for our internal 128 bit state if he knows a couple 160 return val;
158 * of prandom32 outputs! 161}
159 */ 162
160#define LCG(x) ((x) * 69069U) /* super-duper LCG */ 163static void prandom_seed_early(struct rnd_state *state, u32 seed,
161 state->s1 = __seed(LCG(seed), 2U); 164 bool mix_with_hwseed)
162 state->s2 = __seed(LCG(state->s1), 8U); 165{
163 state->s3 = __seed(LCG(state->s2), 16U); 166#define LCG(x) ((x) * 69069U) /* super-duper LCG */
164 state->s4 = __seed(LCG(state->s3), 128U); 167#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
168 state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
169 state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
170 state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
171 state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
165} 172}
166 173
167/** 174/**
@@ -194,14 +201,13 @@ static int __init prandom_init(void)
194{ 201{
195 int i; 202 int i;
196 203
197#ifdef CONFIG_RANDOM32_SELFTEST
198 prandom_state_selftest(); 204 prandom_state_selftest();
199#endif
200 205
201 for_each_possible_cpu(i) { 206 for_each_possible_cpu(i) {
202 struct rnd_state *state = &per_cpu(net_rand_state,i); 207 struct rnd_state *state = &per_cpu(net_rand_state,i);
208 u32 weak_seed = (i + jiffies) ^ random_get_entropy();
203 209
204 prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy()); 210 prandom_seed_early(state, weak_seed, true);
205 prandom_warmup(state); 211 prandom_warmup(state);
206 } 212 }
207 213
@@ -210,6 +216,7 @@ static int __init prandom_init(void)
210core_initcall(prandom_init); 216core_initcall(prandom_init);
211 217
212static void __prandom_timer(unsigned long dontcare); 218static void __prandom_timer(unsigned long dontcare);
219
213static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0); 220static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0);
214 221
215static void __prandom_timer(unsigned long dontcare) 222static void __prandom_timer(unsigned long dontcare)
@@ -419,7 +426,7 @@ static void __init prandom_state_selftest(void)
419 for (i = 0; i < ARRAY_SIZE(test1); i++) { 426 for (i = 0; i < ARRAY_SIZE(test1); i++) {
420 struct rnd_state state; 427 struct rnd_state state;
421 428
422 prandom_seed_very_weak(&state, test1[i].seed); 429 prandom_seed_early(&state, test1[i].seed, false);
423 prandom_warmup(&state); 430 prandom_warmup(&state);
424 431
425 if (test1[i].result != prandom_u32_state(&state)) 432 if (test1[i].result != prandom_u32_state(&state))
@@ -434,7 +441,7 @@ static void __init prandom_state_selftest(void)
434 for (i = 0; i < ARRAY_SIZE(test2); i++) { 441 for (i = 0; i < ARRAY_SIZE(test2); i++) {
435 struct rnd_state state; 442 struct rnd_state state;
436 443
437 prandom_seed_very_weak(&state, test2[i].seed); 444 prandom_seed_early(&state, test2[i].seed, false);
438 prandom_warmup(&state); 445 prandom_warmup(&state);
439 446
440 for (j = 0; j < test2[i].iteration - 1; j++) 447 for (j = 0; j < test2[i].iteration - 1; j++)