aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/random.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-15 15:44:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-15 15:44:02 -0400
commit52f6c588c77b76d548201470c2a28263a41b462b (patch)
treecbb4207714e82f10932a546469bfb3db84051c33 /drivers/char/random.c
parent78dcf73421a879d22319d3889119945b85954a68 (diff)
parent72e5c740f6335e27253b8ff64d23d00337091535 (diff)
Merge tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random
Pull random updates from Ted Ts'o: "Add wait_for_random_bytes() and get_random_*_wait() functions so that callers can more safely get random bytes if they can block until the CRNG is initialized. Also print a warning if get_random_*() is called before the CRNG is initialized. By default, only one single-line warning will be printed per boot. If CONFIG_WARN_ALL_UNSEEDED_RANDOM is defined, then a warning will be printed for each function which tries to get random bytes before the CRNG is initialized. This can get spammy for certain architecture types, so it is not enabled by default" * tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random: random: reorder READ_ONCE() in get_random_uXX random: suppress spammy warnings about unseeded randomness random: warn when kernel uses unseeded randomness net/route: use get_random_int for random counter net/neighbor: use get_random_u32 for 32-bit hash random rhashtable: use get_random_u32 for hash_rnd ceph: ensure RNG is seeded before using iscsi: ensure RNG is seeded before use cifs: use get_random_u32 for 32-bit lock random random: add get_random_{bytes,u32,u64,int,long,once}_wait family random: add wait_for_random_bytes() API
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c96
1 files changed, 76 insertions, 20 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 23cab7a8c1c1..afa3ce7d3e72 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -288,7 +288,6 @@
288#define SEC_XFER_SIZE 512 288#define SEC_XFER_SIZE 512
289#define EXTRACT_SIZE 10 289#define EXTRACT_SIZE 10
290 290
291#define DEBUG_RANDOM_BOOT 0
292 291
293#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long)) 292#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
294 293
@@ -437,6 +436,7 @@ static void _extract_crng(struct crng_state *crng,
437static void _crng_backtrack_protect(struct crng_state *crng, 436static void _crng_backtrack_protect(struct crng_state *crng,
438 __u8 tmp[CHACHA20_BLOCK_SIZE], int used); 437 __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
439static void process_random_ready_list(void); 438static void process_random_ready_list(void);
439static void _get_random_bytes(void *buf, int nbytes);
440 440
441/********************************************************************** 441/**********************************************************************
442 * 442 *
@@ -777,7 +777,7 @@ static void crng_initialize(struct crng_state *crng)
777 _extract_entropy(&input_pool, &crng->state[4], 777 _extract_entropy(&input_pool, &crng->state[4],
778 sizeof(__u32) * 12, 0); 778 sizeof(__u32) * 12, 0);
779 else 779 else
780 get_random_bytes(&crng->state[4], sizeof(__u32) * 12); 780 _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
781 for (i = 4; i < 16; i++) { 781 for (i = 4; i < 16; i++) {
782 if (!arch_get_random_seed_long(&rv) && 782 if (!arch_get_random_seed_long(&rv) &&
783 !arch_get_random_long(&rv)) 783 !arch_get_random_long(&rv))
@@ -851,11 +851,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
851 } 851 }
852} 852}
853 853
854static inline void crng_wait_ready(void)
855{
856 wait_event_interruptible(crng_init_wait, crng_ready());
857}
858
859static void _extract_crng(struct crng_state *crng, 854static void _extract_crng(struct crng_state *crng,
860 __u8 out[CHACHA20_BLOCK_SIZE]) 855 __u8 out[CHACHA20_BLOCK_SIZE])
861{ 856{
@@ -1477,22 +1472,44 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1477 return ret; 1472 return ret;
1478} 1473}
1479 1474
1475#define warn_unseeded_randomness(previous) \
1476 _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
1477
1478static void _warn_unseeded_randomness(const char *func_name, void *caller,
1479 void **previous)
1480{
1481#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1482 const bool print_once = false;
1483#else
1484 static bool print_once __read_mostly;
1485#endif
1486
1487 if (print_once ||
1488 crng_ready() ||
1489 (previous && (caller == READ_ONCE(*previous))))
1490 return;
1491 WRITE_ONCE(*previous, caller);
1492#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1493 print_once = true;
1494#endif
1495 pr_notice("random: %s called from %pF with crng_init=%d\n",
1496 func_name, caller, crng_init);
1497}
1498
1480/* 1499/*
1481 * This function is the exported kernel interface. It returns some 1500 * This function is the exported kernel interface. It returns some
1482 * number of good random numbers, suitable for key generation, seeding 1501 * number of good random numbers, suitable for key generation, seeding
1483 * TCP sequence numbers, etc. It does not rely on the hardware random 1502 * TCP sequence numbers, etc. It does not rely on the hardware random
1484 * number generator. For random bytes direct from the hardware RNG 1503 * number generator. For random bytes direct from the hardware RNG
1485 * (when available), use get_random_bytes_arch(). 1504 * (when available), use get_random_bytes_arch(). In order to ensure
1505 * that the randomness provided by this function is okay, the function
1506 * wait_for_random_bytes() should be called and return 0 at least once
1507 * at any point prior.
1486 */ 1508 */
1487void get_random_bytes(void *buf, int nbytes) 1509static void _get_random_bytes(void *buf, int nbytes)
1488{ 1510{
1489 __u8 tmp[CHACHA20_BLOCK_SIZE]; 1511 __u8 tmp[CHACHA20_BLOCK_SIZE];
1490 1512
1491#if DEBUG_RANDOM_BOOT > 0
1492 if (!crng_ready())
1493 printk(KERN_NOTICE "random: %pF get_random_bytes called "
1494 "with crng_init = %d\n", (void *) _RET_IP_, crng_init);
1495#endif
1496 trace_get_random_bytes(nbytes, _RET_IP_); 1513 trace_get_random_bytes(nbytes, _RET_IP_);
1497 1514
1498 while (nbytes >= CHACHA20_BLOCK_SIZE) { 1515 while (nbytes >= CHACHA20_BLOCK_SIZE) {
@@ -1509,9 +1526,35 @@ void get_random_bytes(void *buf, int nbytes)
1509 crng_backtrack_protect(tmp, CHACHA20_BLOCK_SIZE); 1526 crng_backtrack_protect(tmp, CHACHA20_BLOCK_SIZE);
1510 memzero_explicit(tmp, sizeof(tmp)); 1527 memzero_explicit(tmp, sizeof(tmp));
1511} 1528}
1529
1530void get_random_bytes(void *buf, int nbytes)
1531{
1532 static void *previous;
1533
1534 warn_unseeded_randomness(&previous);
1535 _get_random_bytes(buf, nbytes);
1536}
1512EXPORT_SYMBOL(get_random_bytes); 1537EXPORT_SYMBOL(get_random_bytes);
1513 1538
1514/* 1539/*
1540 * Wait for the urandom pool to be seeded and thus guaranteed to supply
1541 * cryptographically secure random numbers. This applies to: the /dev/urandom
1542 * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
1543 * family of functions. Using any of these functions without first calling
1544 * this function forfeits the guarantee of security.
1545 *
1546 * Returns: 0 if the urandom pool has been seeded.
1547 * -ERESTARTSYS if the function was interrupted by a signal.
1548 */
1549int wait_for_random_bytes(void)
1550{
1551 if (likely(crng_ready()))
1552 return 0;
1553 return wait_event_interruptible(crng_init_wait, crng_ready());
1554}
1555EXPORT_SYMBOL(wait_for_random_bytes);
1556
1557/*
1515 * Add a callback function that will be invoked when the nonblocking 1558 * Add a callback function that will be invoked when the nonblocking
1516 * pool is initialised. 1559 * pool is initialised.
1517 * 1560 *
@@ -1865,6 +1908,8 @@ const struct file_operations urandom_fops = {
1865SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, 1908SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
1866 unsigned int, flags) 1909 unsigned int, flags)
1867{ 1910{
1911 int ret;
1912
1868 if (flags & ~(GRND_NONBLOCK|GRND_RANDOM)) 1913 if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
1869 return -EINVAL; 1914 return -EINVAL;
1870 1915
@@ -1877,9 +1922,9 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
1877 if (!crng_ready()) { 1922 if (!crng_ready()) {
1878 if (flags & GRND_NONBLOCK) 1923 if (flags & GRND_NONBLOCK)
1879 return -EAGAIN; 1924 return -EAGAIN;
1880 crng_wait_ready(); 1925 ret = wait_for_random_bytes();
1881 if (signal_pending(current)) 1926 if (unlikely(ret))
1882 return -ERESTARTSYS; 1927 return ret;
1883 } 1928 }
1884 return urandom_read(NULL, buf, count, NULL); 1929 return urandom_read(NULL, buf, count, NULL);
1885} 1930}
@@ -2040,15 +2085,19 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
2040/* 2085/*
2041 * Get a random word for internal kernel use only. The quality of the random 2086 * Get a random word for internal kernel use only. The quality of the random
2042 * number is either as good as RDRAND or as good as /dev/urandom, with the 2087 * number is either as good as RDRAND or as good as /dev/urandom, with the
2043 * goal of being quite fast and not depleting entropy. 2088 * goal of being quite fast and not depleting entropy. In order to ensure
2089 * that the randomness provided by this function is okay, the function
2090 * wait_for_random_bytes() should be called and return 0 at least once
2091 * at any point prior.
2044 */ 2092 */
2045static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); 2093static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
2046u64 get_random_u64(void) 2094u64 get_random_u64(void)
2047{ 2095{
2048 u64 ret; 2096 u64 ret;
2049 bool use_lock = READ_ONCE(crng_init) < 2; 2097 bool use_lock;
2050 unsigned long flags = 0; 2098 unsigned long flags = 0;
2051 struct batched_entropy *batch; 2099 struct batched_entropy *batch;
2100 static void *previous;
2052 2101
2053#if BITS_PER_LONG == 64 2102#if BITS_PER_LONG == 64
2054 if (arch_get_random_long((unsigned long *)&ret)) 2103 if (arch_get_random_long((unsigned long *)&ret))
@@ -2059,6 +2108,9 @@ u64 get_random_u64(void)
2059 return ret; 2108 return ret;
2060#endif 2109#endif
2061 2110
2111 warn_unseeded_randomness(&previous);
2112
2113 use_lock = READ_ONCE(crng_init) < 2;
2062 batch = &get_cpu_var(batched_entropy_u64); 2114 batch = &get_cpu_var(batched_entropy_u64);
2063 if (use_lock) 2115 if (use_lock)
2064 read_lock_irqsave(&batched_entropy_reset_lock, flags); 2116 read_lock_irqsave(&batched_entropy_reset_lock, flags);
@@ -2078,13 +2130,17 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
2078u32 get_random_u32(void) 2130u32 get_random_u32(void)
2079{ 2131{
2080 u32 ret; 2132 u32 ret;
2081 bool use_lock = READ_ONCE(crng_init) < 2; 2133 bool use_lock;
2082 unsigned long flags = 0; 2134 unsigned long flags = 0;
2083 struct batched_entropy *batch; 2135 struct batched_entropy *batch;
2136 static void *previous;
2084 2137
2085 if (arch_get_random_int(&ret)) 2138 if (arch_get_random_int(&ret))
2086 return ret; 2139 return ret;
2087 2140
2141 warn_unseeded_randomness(&previous);
2142
2143 use_lock = READ_ONCE(crng_init) < 2;
2088 batch = &get_cpu_var(batched_entropy_u32); 2144 batch = &get_cpu_var(batched_entropy_u32);
2089 if (use_lock) 2145 if (use_lock)
2090 read_lock_irqsave(&batched_entropy_reset_lock, flags); 2146 read_lock_irqsave(&batched_entropy_reset_lock, flags);