diff options
author | Jason A. Donenfeld <Jason@zx2c4.com> | 2017-01-22 10:34:08 -0500 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2017-01-27 14:25:06 -0500 |
commit | c440408cf6901eeb2c09563397e24a9097907078 (patch) | |
tree | 0b6333b3529b2d1608649fdbb2fa9217cdb020d7 /drivers/char/random.c | |
parent | f5b98461cb8167ba362ad9f74c41d126b7becea7 (diff) |
random: convert get_random_int/long into get_random_u32/u64
Many times, when a user wants a random number, he wants a random number
of a guaranteed size. So, thinking of get_random_int and get_random_long
in terms of get_random_u32 and get_random_u64 makes it much easier to
achieve this. It also makes the code simpler.
On 32-bit platforms, get_random_int and get_random_long are both aliased
to get_random_u32. On 64-bit platforms, int->u32 and long->u64.
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r-- | drivers/char/random.c | 55 |
1 files changed, 27 insertions, 28 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c index b800e5479b7d..066ae125f2c8 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -2018,8 +2018,8 @@ struct ctl_table random_table[] = { | |||
2018 | 2018 | ||
2019 | struct batched_entropy { | 2019 | struct batched_entropy { |
2020 | union { | 2020 | union { |
2021 | unsigned long entropy_long[CHACHA20_BLOCK_SIZE / sizeof(unsigned long)]; | 2021 | u64 entropy_u64[CHACHA20_BLOCK_SIZE / sizeof(u64)]; |
2022 | unsigned int entropy_int[CHACHA20_BLOCK_SIZE / sizeof(unsigned int)]; | 2022 | u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)]; |
2023 | }; | 2023 | }; |
2024 | unsigned int position; | 2024 | unsigned int position; |
2025 | }; | 2025 | }; |
@@ -2029,52 +2029,51 @@ struct batched_entropy { | |||
2029 | * number is either as good as RDRAND or as good as /dev/urandom, with the | 2029 | * number is either as good as RDRAND or as good as /dev/urandom, with the |
2030 | * goal of being quite fast and not depleting entropy. | 2030 | * goal of being quite fast and not depleting entropy. |
2031 | */ | 2031 | */ |
2032 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long); | 2032 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); |
2033 | unsigned long get_random_long(void) | 2033 | u64 get_random_u64(void) |
2034 | { | 2034 | { |
2035 | unsigned long ret; | 2035 | u64 ret; |
2036 | struct batched_entropy *batch; | 2036 | struct batched_entropy *batch; |
2037 | 2037 | ||
2038 | if (arch_get_random_long(&ret)) | 2038 | #if BITS_PER_LONG == 64 |
2039 | if (arch_get_random_long((unsigned long *)&ret)) | ||
2039 | return ret; | 2040 | return ret; |
2041 | #else | ||
2042 | if (arch_get_random_long((unsigned long *)&ret) && | ||
2043 | arch_get_random_long((unsigned long *)&ret + 1)) | ||
2044 | return ret; | ||
2045 | #endif | ||
2040 | 2046 | ||
2041 | batch = &get_cpu_var(batched_entropy_long); | 2047 | batch = &get_cpu_var(batched_entropy_u64); |
2042 | if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) { | 2048 | if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { |
2043 | extract_crng((u8 *)batch->entropy_long); | 2049 | extract_crng((u8 *)batch->entropy_u64); |
2044 | batch->position = 0; | 2050 | batch->position = 0; |
2045 | } | 2051 | } |
2046 | ret = batch->entropy_long[batch->position++]; | 2052 | ret = batch->entropy_u64[batch->position++]; |
2047 | put_cpu_var(batched_entropy_long); | 2053 | put_cpu_var(batched_entropy_u64); |
2048 | return ret; | 2054 | return ret; |
2049 | } | 2055 | } |
2050 | EXPORT_SYMBOL(get_random_long); | 2056 | EXPORT_SYMBOL(get_random_u64); |
2051 | 2057 | ||
2052 | #if BITS_PER_LONG == 32 | 2058 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); |
2053 | unsigned int get_random_int(void) | 2059 | u32 get_random_u32(void) |
2054 | { | ||
2055 | return get_random_long(); | ||
2056 | } | ||
2057 | #else | ||
2058 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_int); | ||
2059 | unsigned int get_random_int(void) | ||
2060 | { | 2060 | { |
2061 | unsigned int ret; | 2061 | u32 ret; |
2062 | struct batched_entropy *batch; | 2062 | struct batched_entropy *batch; |
2063 | 2063 | ||
2064 | if (arch_get_random_int(&ret)) | 2064 | if (arch_get_random_int(&ret)) |
2065 | return ret; | 2065 | return ret; |
2066 | 2066 | ||
2067 | batch = &get_cpu_var(batched_entropy_int); | 2067 | batch = &get_cpu_var(batched_entropy_u32); |
2068 | if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) { | 2068 | if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { |
2069 | extract_crng((u8 *)batch->entropy_int); | 2069 | extract_crng((u8 *)batch->entropy_u32); |
2070 | batch->position = 0; | 2070 | batch->position = 0; |
2071 | } | 2071 | } |
2072 | ret = batch->entropy_int[batch->position++]; | 2072 | ret = batch->entropy_u32[batch->position++]; |
2073 | put_cpu_var(batched_entropy_int); | 2073 | put_cpu_var(batched_entropy_u32); |
2074 | return ret; | 2074 | return ret; |
2075 | } | 2075 | } |
2076 | #endif | 2076 | EXPORT_SYMBOL(get_random_u32); |
2077 | EXPORT_SYMBOL(get_random_int); | ||
2078 | 2077 | ||
2079 | /** | 2078 | /** |
2080 | * randomize_page - Generate a random, page aligned address | 2079 | * randomize_page - Generate a random, page aligned address |