aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/random.c
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2017-06-08 04:16:59 -0400
committerTheodore Ts'o <tytso@mit.edu>2017-07-15 12:19:28 -0400
commiteecabf567422eda02bd179f2707d8fe24f52d888 (patch)
tree621859908bc9613fa913338d513785072d07ae93 /drivers/char/random.c
parentd06bfd1989fe97623b32d6df4ffa6e4338c99dc8 (diff)
random: suppress spammy warnings about unseeded randomness
Unfortunately, on some models of some architectures getting a fully seeded CRNG is extremely difficult, and so this can result in dmesg getting spammed for a surprisingly long time. This is really bad from a security perspective, and so architecture maintainers really need to do what they can to get the CRNG seeded sooner after the system is booted. However, users can't do anything actionble to address this, and spamming the kernel messages log will only just annoy people. For developers who want to work on improving this situation, CONFIG_WARN_UNSEEDED_RANDOM has been renamed to CONFIG_WARN_ALL_UNSEEDED_RANDOM. By default the kernel will always print the first use of unseeded randomness. This way, hopefully the security obsessed will be happy that there is _some_ indication when the kernel boots there may be a potential issue with that architecture or subarchitecture. To see all uses of unseeded randomness, developers can enable CONFIG_WARN_ALL_UNSEEDED_RANDOM. Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c56
1 files changed, 39 insertions, 17 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index fa5bbd5a7ca0..799d37981d99 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -436,6 +436,7 @@ static void _extract_crng(struct crng_state *crng,
436static void _crng_backtrack_protect(struct crng_state *crng, 436static void _crng_backtrack_protect(struct crng_state *crng,
437 __u8 tmp[CHACHA20_BLOCK_SIZE], int used); 437 __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
438static void process_random_ready_list(void); 438static void process_random_ready_list(void);
439static void _get_random_bytes(void *buf, int nbytes);
439 440
440/********************************************************************** 441/**********************************************************************
441 * 442 *
@@ -776,7 +777,7 @@ static void crng_initialize(struct crng_state *crng)
776 _extract_entropy(&input_pool, &crng->state[4], 777 _extract_entropy(&input_pool, &crng->state[4],
777 sizeof(__u32) * 12, 0); 778 sizeof(__u32) * 12, 0);
778 else 779 else
779 get_random_bytes(&crng->state[4], sizeof(__u32) * 12); 780 _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
780 for (i = 4; i < 16; i++) { 781 for (i = 4; i < 16; i++) {
781 if (!arch_get_random_seed_long(&rv) && 782 if (!arch_get_random_seed_long(&rv) &&
782 !arch_get_random_long(&rv)) 783 !arch_get_random_long(&rv))
@@ -1466,6 +1467,30 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1466 return ret; 1467 return ret;
1467} 1468}
1468 1469
1470#define warn_unseeded_randomness(previous) \
1471 _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
1472
1473static void _warn_unseeded_randomness(const char *func_name, void *caller,
1474 void **previous)
1475{
1476#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1477 const bool print_once = false;
1478#else
1479 static bool print_once __read_mostly;
1480#endif
1481
1482 if (print_once ||
1483 crng_ready() ||
1484 (previous && (caller == READ_ONCE(*previous))))
1485 return;
1486 WRITE_ONCE(*previous, caller);
1487#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1488 print_once = true;
1489#endif
1490 pr_notice("random: %s called from %pF with crng_init=%d\n",
1491 func_name, caller, crng_init);
1492}
1493
1469/* 1494/*
1470 * This function is the exported kernel interface. It returns some 1495 * This function is the exported kernel interface. It returns some
1471 * number of good random numbers, suitable for key generation, seeding 1496 * number of good random numbers, suitable for key generation, seeding
@@ -1476,15 +1501,10 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1476 * wait_for_random_bytes() should be called and return 0 at least once 1501 * wait_for_random_bytes() should be called and return 0 at least once
1477 * at any point prior. 1502 * at any point prior.
1478 */ 1503 */
1479void get_random_bytes(void *buf, int nbytes) 1504static void _get_random_bytes(void *buf, int nbytes)
1480{ 1505{
1481 __u8 tmp[CHACHA20_BLOCK_SIZE]; 1506 __u8 tmp[CHACHA20_BLOCK_SIZE];
1482 1507
1483#ifdef CONFIG_WARN_UNSEEDED_RANDOM
1484 if (!crng_ready())
1485 printk(KERN_NOTICE "random: %pF get_random_bytes called "
1486 "with crng_init = %d\n", (void *) _RET_IP_, crng_init);
1487#endif
1488 trace_get_random_bytes(nbytes, _RET_IP_); 1508 trace_get_random_bytes(nbytes, _RET_IP_);
1489 1509
1490 while (nbytes >= CHACHA20_BLOCK_SIZE) { 1510 while (nbytes >= CHACHA20_BLOCK_SIZE) {
@@ -1501,6 +1521,14 @@ void get_random_bytes(void *buf, int nbytes)
1501 crng_backtrack_protect(tmp, CHACHA20_BLOCK_SIZE); 1521 crng_backtrack_protect(tmp, CHACHA20_BLOCK_SIZE);
1502 memzero_explicit(tmp, sizeof(tmp)); 1522 memzero_explicit(tmp, sizeof(tmp));
1503} 1523}
1524
1525void get_random_bytes(void *buf, int nbytes)
1526{
1527 static void *previous;
1528
1529 warn_unseeded_randomness(&previous);
1530 _get_random_bytes(buf, nbytes);
1531}
1504EXPORT_SYMBOL(get_random_bytes); 1532EXPORT_SYMBOL(get_random_bytes);
1505 1533
1506/* 1534/*
@@ -2064,6 +2092,7 @@ u64 get_random_u64(void)
2064 bool use_lock = READ_ONCE(crng_init) < 2; 2092 bool use_lock = READ_ONCE(crng_init) < 2;
2065 unsigned long flags = 0; 2093 unsigned long flags = 0;
2066 struct batched_entropy *batch; 2094 struct batched_entropy *batch;
2095 static void *previous;
2067 2096
2068#if BITS_PER_LONG == 64 2097#if BITS_PER_LONG == 64
2069 if (arch_get_random_long((unsigned long *)&ret)) 2098 if (arch_get_random_long((unsigned long *)&ret))
@@ -2074,11 +2103,7 @@ u64 get_random_u64(void)
2074 return ret; 2103 return ret;
2075#endif 2104#endif
2076 2105
2077#ifdef CONFIG_WARN_UNSEEDED_RANDOM 2106 warn_unseeded_randomness(&previous);
2078 if (!crng_ready())
2079 printk(KERN_NOTICE "random: %pF get_random_u64 called "
2080 "with crng_init = %d\n", (void *) _RET_IP_, crng_init);
2081#endif
2082 2107
2083 batch = &get_cpu_var(batched_entropy_u64); 2108 batch = &get_cpu_var(batched_entropy_u64);
2084 if (use_lock) 2109 if (use_lock)
@@ -2102,15 +2127,12 @@ u32 get_random_u32(void)
2102 bool use_lock = READ_ONCE(crng_init) < 2; 2127 bool use_lock = READ_ONCE(crng_init) < 2;
2103 unsigned long flags = 0; 2128 unsigned long flags = 0;
2104 struct batched_entropy *batch; 2129 struct batched_entropy *batch;
2130 static void *previous;
2105 2131
2106 if (arch_get_random_int(&ret)) 2132 if (arch_get_random_int(&ret))
2107 return ret; 2133 return ret;
2108 2134
2109#ifdef CONFIG_WARN_UNSEEDED_RANDOM 2135 warn_unseeded_randomness(&previous);
2110 if (!crng_ready())
2111 printk(KERN_NOTICE "random: %pF get_random_u32 called "
2112 "with crng_init = %d\n", (void *) _RET_IP_, crng_init);
2113#endif
2114 2136
2115 batch = &get_cpu_var(batched_entropy_u32); 2137 batch = &get_cpu_var(batched_entropy_u32);
2116 if (use_lock) 2138 if (use_lock)