diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2017-06-30 10:37:13 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2017-07-15 12:33:22 -0400 |
commit | 72e5c740f6335e27253b8ff64d23d00337091535 (patch) | |
tree | 277b0d7fdd627c80c54deafae9ab10b27c3d39de /drivers/char/random.c | |
parent | eecabf567422eda02bd179f2707d8fe24f52d888 (diff) |
random: reorder READ_ONCE() in get_random_uXX
Avoid the READ_ONCE in commit 4a072c71f49b ("random: silence compiler
warnings and fix race") if we can leave the function after
arch_get_random_XXX().
Cc: Jason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r-- | drivers/char/random.c | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c index 799d37981d99..05d255e1c112 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -2089,7 +2089,7 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); | |||
2089 | u64 get_random_u64(void) | 2089 | u64 get_random_u64(void) |
2090 | { | 2090 | { |
2091 | u64 ret; | 2091 | u64 ret; |
2092 | bool use_lock = READ_ONCE(crng_init) < 2; | 2092 | bool use_lock; |
2093 | unsigned long flags = 0; | 2093 | unsigned long flags = 0; |
2094 | struct batched_entropy *batch; | 2094 | struct batched_entropy *batch; |
2095 | static void *previous; | 2095 | static void *previous; |
@@ -2105,6 +2105,7 @@ u64 get_random_u64(void) | |||
2105 | 2105 | ||
2106 | warn_unseeded_randomness(&previous); | 2106 | warn_unseeded_randomness(&previous); |
2107 | 2107 | ||
2108 | use_lock = READ_ONCE(crng_init) < 2; | ||
2108 | batch = &get_cpu_var(batched_entropy_u64); | 2109 | batch = &get_cpu_var(batched_entropy_u64); |
2109 | if (use_lock) | 2110 | if (use_lock) |
2110 | read_lock_irqsave(&batched_entropy_reset_lock, flags); | 2111 | read_lock_irqsave(&batched_entropy_reset_lock, flags); |
@@ -2124,7 +2125,7 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); | |||
2124 | u32 get_random_u32(void) | 2125 | u32 get_random_u32(void) |
2125 | { | 2126 | { |
2126 | u32 ret; | 2127 | u32 ret; |
2127 | bool use_lock = READ_ONCE(crng_init) < 2; | 2128 | bool use_lock; |
2128 | unsigned long flags = 0; | 2129 | unsigned long flags = 0; |
2129 | struct batched_entropy *batch; | 2130 | struct batched_entropy *batch; |
2130 | static void *previous; | 2131 | static void *previous; |
@@ -2134,6 +2135,7 @@ u32 get_random_u32(void) | |||
2134 | 2135 | ||
2135 | warn_unseeded_randomness(&previous); | 2136 | warn_unseeded_randomness(&previous); |
2136 | 2137 | ||
2138 | use_lock = READ_ONCE(crng_init) < 2; | ||
2137 | batch = &get_cpu_var(batched_entropy_u32); | 2139 | batch = &get_cpu_var(batched_entropy_u32); |
2138 | if (use_lock) | 2140 | if (use_lock) |
2139 | read_lock_irqsave(&batched_entropy_reset_lock, flags); | 2141 | read_lock_irqsave(&batched_entropy_reset_lock, flags); |